3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
75 #include <linux/delay.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92 #include "et131x_isr.h"
94 #include "et1310_tx.h"
97 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
98 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
99 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
101 static int et131x_send_packet(struct sk_buff *skb,
102 struct et131x_adapter *etdev);
103 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
106 * et131x_tx_dma_memory_alloc
107 * @adapter: pointer to our private adapter structure
109 * Returns 0 on success and errno on failure (as defined in errno.h).
111 * Allocates memory that will be visible both to the device and to the CPU.
112 * The OS will pass us packets, pointers to which we will insert in the Tx
113 * Descriptor queue. The device will read this queue to find the packets in
114 * memory. The device will update the "status" in memory each time it xmits a
117 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
120 struct tx_ring *tx_ring = &adapter->tx_ring;
122 /* Allocate memory for the TCB's (Transmit Control Block) */
123 adapter->tx_ring.MpTcbMem = (struct tcb *)
124 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
125 if (!adapter->tx_ring.MpTcbMem) {
126 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
130 /* Allocate enough memory for the Tx descriptor ring, and allocate
131 * some extra so that the ring can be aligned on a 4k boundary.
133 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
134 tx_ring->tx_desc_ring =
135 (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
136 &tx_ring->tx_desc_ring_pa);
137 if (!adapter->tx_ring.tx_desc_ring) {
138 dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
142 /* Save physical address
144 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
145 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
146 * are ever returned, make sure the high part is retrieved here before
147 * storing the adjusted address.
149 /* Allocate memory for the Tx status block */
150 tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
151 sizeof(TX_STATUS_BLOCK_t),
152 &tx_ring->pTxStatusPa);
153 if (!adapter->tx_ring.pTxStatusPa) {
154 dev_err(&adapter->pdev->dev,
155 "Cannot alloc memory for Tx status block\n");
159 /* Allocate memory for a dummy buffer */
160 tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
162 &tx_ring->pTxDummyBlkPa);
163 if (!adapter->tx_ring.pTxDummyBlkPa) {
164 dev_err(&adapter->pdev->dev,
165 "Cannot alloc memory for Tx dummy buffer\n");
173 * et131x_tx_dma_memory_free - Free all memory allocated within this module
174 * @adapter: pointer to our private adapter structure
176 * Returns 0 on success and errno on failure (as defined in errno.h).
178 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
182 if (adapter->tx_ring.tx_desc_ring) {
183 /* Free memory relating to Tx rings here */
184 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
186 pci_free_consistent(adapter->pdev,
188 adapter->tx_ring.tx_desc_ring,
189 adapter->tx_ring.tx_desc_ring_pa);
190 adapter->tx_ring.tx_desc_ring = NULL;
193 /* Free memory for the Tx status block */
194 if (adapter->tx_ring.pTxStatusVa) {
195 pci_free_consistent(adapter->pdev,
196 sizeof(TX_STATUS_BLOCK_t),
197 adapter->tx_ring.pTxStatusVa,
198 adapter->tx_ring.pTxStatusPa);
200 adapter->tx_ring.pTxStatusVa = NULL;
203 /* Free memory for the dummy buffer */
204 if (adapter->tx_ring.pTxDummyBlkVa) {
205 pci_free_consistent(adapter->pdev,
207 adapter->tx_ring.pTxDummyBlkVa,
208 adapter->tx_ring.pTxDummyBlkPa);
210 adapter->tx_ring.pTxDummyBlkVa = NULL;
213 /* Free the memory for the tcb structures */
214 kfree(adapter->tx_ring.MpTcbMem);
218 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
219 * @etdev: pointer to our private adapter structure
221 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
223 struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
225 /* Load the hardware with the start of the transmit descriptor ring. */
226 writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32),
228 writel((u32) etdev->tx_ring.tx_desc_ring_pa,
231 /* Initialise the transmit DMA engine */
232 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
234 /* Load the completion writeback physical address
236 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
237 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
238 * are ever returned, make sure the high part is retrieved here before
239 * storing the adjusted address.
241 writel(0, &txdma->dma_wb_base_hi);
242 writel(etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
244 memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
246 writel(0, &txdma->service_request);
247 etdev->tx_ring.txDmaReadyToSend = 0;
251 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
252 * @etdev: pointer to our adapter structure
254 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
256 /* Setup the tramsmit dma configuration register */
257 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
258 &etdev->regs->txdma.csr);
262 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
263 * @etdev: pointer to our adapter structure
265 * Mainly used after a return to the D0 (full-power) state from a lower state.
267 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
269 /* Setup the transmit dma configuration register for normal
272 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
273 &etdev->regs->txdma.csr);
277 * et131x_init_send - Initialize send data structures
278 * @adapter: pointer to our private adapter structure
280 void et131x_init_send(struct et131x_adapter *adapter)
284 struct tx_ring *tx_ring;
286 /* Setup some convenience pointers */
287 tx_ring = &adapter->tx_ring;
288 tcb = adapter->tx_ring.MpTcbMem;
290 tx_ring->TCBReadyQueueHead = tcb;
292 /* Go through and set up each TCB */
293 for (count = 0; count < NUM_TCB; count++) {
294 memset(tcb, 0, sizeof(struct tcb));
296 /* Set the link pointer in HW TCB to the next TCB in the
297 * chain. If this is the last TCB in the chain, also set the
300 if (count < NUM_TCB - 1) {
303 tx_ring->TCBReadyQueueTail = tcb;
310 /* Curr send queue should now be empty */
311 tx_ring->CurrSendHead = NULL;
312 tx_ring->CurrSendTail = NULL;
314 INIT_LIST_HEAD(&adapter->tx_ring.SendWaitQueue);
318 * et131x_send_packets - This function is called by the OS to send packets
319 * @skb: the packet(s) to send
320 * @netdev:device on which to TX the above packet(s)
322 * Return 0 in almost all cases; non-zero value in extreme hard failure only
324 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
327 struct et131x_adapter *etdev = NULL;
329 etdev = netdev_priv(netdev);
331 /* Send these packets
333 * NOTE: The Linux Tx entry point is only given one packet at a time
334 * to Tx, so the PacketCount and it's array used makes no sense here
337 /* Queue is not empty or TCB is not available */
338 if (!list_empty(&etdev->tx_ring.SendWaitQueue) ||
339 MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
340 /* NOTE: If there's an error on send, no need to queue the
341 * packet under Linux; if we just send an error up to the
342 * netif layer, it will resend the skb to us.
346 /* We need to see if the link is up; if it's not, make the
347 * netif layer think we're good and drop the packet
350 * if( MP_SHOULD_FAIL_SEND( etdev ) ||
351 * etdev->DriverNoPhyAccess )
353 if (MP_SHOULD_FAIL_SEND(etdev) || !netif_carrier_ok(netdev)) {
354 dev_kfree_skb_any(skb);
357 etdev->net_stats.tx_dropped++;
359 status = et131x_send_packet(skb, etdev);
361 if (status == -ENOMEM) {
363 /* NOTE: If there's an error on send, no need
364 * to queue the packet under Linux; if we just
365 * send an error up to the netif layer, it
366 * will resend the skb to us.
368 } else if (status != 0) {
369 /* On any other error, make netif think we're
370 * OK and drop the packet
372 dev_kfree_skb_any(skb);
374 etdev->net_stats.tx_dropped++;
382 * et131x_send_packet - Do the work to send a packet
383 * @skb: the packet(s) to send
384 * @etdev: a pointer to the device's private adapter structure
386 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
388 * Assumption: Send spinlock has been acquired
390 static int et131x_send_packet(struct sk_buff *skb,
391 struct et131x_adapter *etdev)
394 struct tcb *tcb = NULL;
398 /* All packets must have at least a MAC address and a protocol type */
399 if (skb->len < ETH_HLEN)
402 /* Get a TCB for this packet */
403 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
405 tcb = etdev->tx_ring.TCBReadyQueueHead;
408 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
412 etdev->tx_ring.TCBReadyQueueHead = tcb->Next;
414 if (etdev->tx_ring.TCBReadyQueueHead == NULL)
415 etdev->tx_ring.TCBReadyQueueTail = NULL;
417 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
419 tcb->PacketLength = skb->len;
422 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
423 shbufva = (uint16_t *) skb->data;
425 if ((shbufva[0] == 0xffff) &&
426 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
427 tcb->Flags |= fMP_DEST_BROAD;
428 } else if ((shbufva[0] & 0x3) == 0x0001) {
429 tcb->Flags |= fMP_DEST_MULTI;
435 /* Call the NIC specific send handler. */
437 status = nic_send_packet(etdev, tcb);
440 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
442 if (etdev->tx_ring.TCBReadyQueueTail) {
443 etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
445 /* Apparently ready Q is empty. */
446 etdev->tx_ring.TCBReadyQueueHead = tcb;
449 etdev->tx_ring.TCBReadyQueueTail = tcb;
450 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
453 WARN_ON(etdev->tx_ring.nBusySend > NUM_TCB);
458 * nic_send_packet - NIC specific send handler for version B silicon.
459 * @etdev: pointer to our adapter
460 * @tcb: pointer to struct tcb
462 * Returns 0 or errno.
464 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
467 struct tx_desc desc[24]; /* 24 x 16 byte */
469 u32 thiscopy, remainder;
470 struct sk_buff *skb = tcb->Packet;
471 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
472 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
475 /* Part of the optimizations of this send routine restrict us to
476 * sending 24 fragments at a pass. In practice we should never see
477 * more than 5 fragments.
479 * NOTE: The older version of this function (below) can handle any
480 * number of fragments. If needed, we can call this function,
481 * although it is less efficient.
486 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
488 for (i = 0; i < nr_frags; i++) {
489 /* If there is something in this element, lets get a
490 * descriptor from the ring and get the necessary data
493 /* If the fragments are smaller than a standard MTU,
494 * then map them to a single descriptor in the Tx
495 * Desc ring. However, if they're larger, as is
496 * possible with support for jumbo packets, then
497 * split them each across 2 descriptors.
499 * This will work until we determine why the hardware
500 * doesn't seem to like large fragments.
502 if ((skb->len - skb->data_len) <= 1514) {
503 desc[frag].addr_hi = 0;
504 /* Low 16bits are length, high is vlan and
505 unused currently so zero */
506 desc[frag].len_vlan =
507 skb->len - skb->data_len;
509 /* NOTE: Here, the dma_addr_t returned from
510 * pci_map_single() is implicitly cast as a
511 * u32. Although dma_addr_t can be
512 * 64-bit, the address returned by
513 * pci_map_single() is always 32-bit
514 * addressable (as defined by the pci/dma
517 desc[frag++].addr_lo =
518 pci_map_single(etdev->pdev,
524 desc[frag].addr_hi = 0;
525 desc[frag].len_vlan =
526 (skb->len - skb->data_len) / 2;
528 /* NOTE: Here, the dma_addr_t returned from
529 * pci_map_single() is implicitly cast as a
530 * u32. Although dma_addr_t can be
531 * 64-bit, the address returned by
532 * pci_map_single() is always 32-bit
533 * addressable (as defined by the pci/dma
536 desc[frag++].addr_lo =
537 pci_map_single(etdev->pdev,
542 desc[frag].addr_hi = 0;
544 desc[frag].len_vlan =
545 (skb->len - skb->data_len) / 2;
547 /* NOTE: Here, the dma_addr_t returned from
548 * pci_map_single() is implicitly cast as a
549 * u32. Although dma_addr_t can be
550 * 64-bit, the address returned by
551 * pci_map_single() is always 32-bit
552 * addressable (as defined by the pci/dma
555 desc[frag++].addr_lo =
556 pci_map_single(etdev->pdev,
565 desc[frag].addr_hi = 0;
566 desc[frag].len_vlan =
569 /* NOTE: Here, the dma_addr_t returned from
570 * pci_map_page() is implicitly cast as a u32.
571 * Although dma_addr_t can be 64-bit, the address
572 * returned by pci_map_page() is always 32-bit
573 * addressable (as defined by the pci/dma subsystem)
575 desc[frag++].addr_lo =
576 pci_map_page(etdev->pdev,
578 frags[i - 1].page_offset,
587 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
588 if (++etdev->tx_ring.TxPacketsSinceLastinterrupt ==
589 PARM_TX_NUM_BUFS_DEF) {
590 /* Last element & Interrupt flag */
591 desc[frag - 1].flags = 0x5;
592 etdev->tx_ring.TxPacketsSinceLastinterrupt = 0;
593 } else { /* Last element */
594 desc[frag - 1].flags = 0x1;
597 desc[frag - 1].flags = 0x5;
599 desc[0].flags |= 2; /* First element flag */
601 tcb->WrIndexStart = etdev->tx_ring.txDmaReadyToSend;
602 tcb->PacketStaleCount = 0;
604 spin_lock_irqsave(&etdev->SendHWLock, flags);
606 thiscopy = NUM_DESC_PER_RING_TX -
607 INDEX10(etdev->tx_ring.txDmaReadyToSend);
609 if (thiscopy >= frag) {
613 remainder = frag - thiscopy;
616 memcpy(etdev->tx_ring.tx_desc_ring +
617 INDEX10(etdev->tx_ring.txDmaReadyToSend), desc,
618 sizeof(struct tx_desc) * thiscopy);
620 add_10bit(&etdev->tx_ring.txDmaReadyToSend, thiscopy);
622 if (INDEX10(etdev->tx_ring.txDmaReadyToSend)== 0 ||
623 INDEX10(etdev->tx_ring.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
624 etdev->tx_ring.txDmaReadyToSend &= ~ET_DMA10_MASK;
625 etdev->tx_ring.txDmaReadyToSend ^= ET_DMA10_WRAP;
629 memcpy(etdev->tx_ring.tx_desc_ring,
631 sizeof(struct tx_desc) * remainder);
633 add_10bit(&etdev->tx_ring.txDmaReadyToSend, remainder);
636 if (INDEX10(etdev->tx_ring.txDmaReadyToSend) == 0) {
637 if (etdev->tx_ring.txDmaReadyToSend)
638 tcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
640 tcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
642 tcb->WrIndex = etdev->tx_ring.txDmaReadyToSend - 1;
644 spin_lock(&etdev->TCBSendQLock);
646 if (etdev->tx_ring.CurrSendTail)
647 etdev->tx_ring.CurrSendTail->Next = tcb;
649 etdev->tx_ring.CurrSendHead = tcb;
651 etdev->tx_ring.CurrSendTail = tcb;
653 WARN_ON(tcb->Next != NULL);
655 etdev->tx_ring.nBusySend++;
657 spin_unlock(&etdev->TCBSendQLock);
659 /* Write the new write pointer back to the device. */
660 writel(etdev->tx_ring.txDmaReadyToSend,
661 &etdev->regs->txdma.service_request);
663 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
664 * timer to wake us up if this packet isn't followed by N more.
666 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
667 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
668 &etdev->regs->global.watchdog_timer);
670 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
677 * et131x_free_send_packet - Recycle a struct tcb
678 * @etdev: pointer to our adapter
679 * @tcb: pointer to struct tcb
681 * Complete the packet if necessary
682 * Assumption - Send spinlock has been acquired
684 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
688 struct tx_desc *desc = NULL;
689 struct net_device_stats *stats = &etdev->net_stats;
691 if (tcb->Flags & fMP_DEST_BROAD)
692 atomic_inc(&etdev->Stats.brdcstxmt);
693 else if (tcb->Flags & fMP_DEST_MULTI)
694 atomic_inc(&etdev->Stats.multixmt);
696 atomic_inc(&etdev->Stats.unixmt);
699 stats->tx_bytes += tcb->Packet->len;
701 /* Iterate through the TX descriptors on the ring
702 * corresponding to this packet and umap the fragments
706 desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
707 INDEX10(tcb->WrIndexStart));
709 pci_unmap_single(etdev->pdev,
711 desc->len_vlan, PCI_DMA_TODEVICE);
713 add_10bit(&tcb->WrIndexStart, 1);
714 if (INDEX10(tcb->WrIndexStart) >=
715 NUM_DESC_PER_RING_TX) {
716 tcb->WrIndexStart &= ~ET_DMA10_MASK;
717 tcb->WrIndexStart ^= ET_DMA10_WRAP;
719 } while (desc != (etdev->tx_ring.tx_desc_ring +
720 INDEX10(tcb->WrIndex)));
722 dev_kfree_skb_any(tcb->Packet);
725 memset(tcb, 0, sizeof(struct tcb));
727 /* Add the TCB to the Ready Q */
728 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
730 etdev->Stats.opackets++;
732 if (etdev->tx_ring.TCBReadyQueueTail) {
733 etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
735 /* Apparently ready Q is empty. */
736 etdev->tx_ring.TCBReadyQueueHead = tcb;
739 etdev->tx_ring.TCBReadyQueueTail = tcb;
741 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
742 WARN_ON(etdev->tx_ring.nBusySend < 0);
746 * et131x_free_busy_send_packets - Free and complete the stopped active sends
747 * @etdev: pointer to our adapter
749 * Assumption - Send spinlock has been acquired
751 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
754 struct list_head *entry;
758 while (!list_empty(&etdev->tx_ring.SendWaitQueue)) {
759 spin_lock_irqsave(&etdev->SendWaitLock, flags);
761 etdev->tx_ring.nWaitSend--;
762 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
764 entry = etdev->tx_ring.SendWaitQueue.next;
767 etdev->tx_ring.nWaitSend = 0;
769 /* Any packets being sent? Check the first TCB on the send list */
770 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
772 tcb = etdev->tx_ring.CurrSendHead;
774 while ((tcb != NULL) && (freed < NUM_TCB)) {
775 struct tcb *pNext = tcb->Next;
777 etdev->tx_ring.CurrSendHead = pNext;
780 etdev->tx_ring.CurrSendTail = NULL;
782 etdev->tx_ring.nBusySend--;
784 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
787 et131x_free_send_packet(etdev, tcb);
789 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
791 tcb = etdev->tx_ring.CurrSendHead;
794 WARN_ON(freed == NUM_TCB);
796 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
798 etdev->tx_ring.nBusySend = 0;
802 * et131x_handle_send_interrupt - Interrupt handler for sending processing
803 * @etdev: pointer to our adapter
805 * Re-claim the send resources, complete sends and get more to send from
806 * the send wait queue.
808 * Assumption - Send spinlock has been acquired
810 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
812 /* Mark as completed any packets which have been sent by the device. */
813 et131x_update_tcb_list(etdev);
815 /* If we queued any transmits because we didn't have any TCBs earlier,
816 * dequeue and send those packets now, as long as we have free TCBs.
818 et131x_check_send_wait_list(etdev);
822 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
823 * @etdev: pointer to our adapter
825 * Re-claims the send resources and completes sends. Can also be called as
826 * part of the NIC send routine when the "ServiceComplete" indication has
829 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
836 serviced = readl(&etdev->regs->txdma.NewServiceComplete);
837 index = INDEX10(serviced);
839 /* Has the ring wrapped? Process any descriptors that do not have
840 * the same "wrap" indicator as the current completion indicator
842 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
844 tcb = etdev->tx_ring.CurrSendHead;
847 ((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) &&
848 index < INDEX10(tcb->WrIndex)) {
849 etdev->tx_ring.nBusySend--;
850 etdev->tx_ring.CurrSendHead = tcb->Next;
851 if (tcb->Next == NULL)
852 etdev->tx_ring.CurrSendTail = NULL;
854 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
855 et131x_free_send_packet(etdev, tcb);
856 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
858 /* Goto the next packet */
859 tcb = etdev->tx_ring.CurrSendHead;
862 !((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP)
863 && index > (tcb->WrIndex & ET_DMA10_MASK)) {
864 etdev->tx_ring.nBusySend--;
865 etdev->tx_ring.CurrSendHead = tcb->Next;
866 if (tcb->Next == NULL)
867 etdev->tx_ring.CurrSendTail = NULL;
869 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
870 et131x_free_send_packet(etdev, tcb);
871 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
873 /* Goto the next packet */
874 tcb = etdev->tx_ring.CurrSendHead;
877 /* Wake up the queue when we hit a low-water mark */
878 if (etdev->tx_ring.nBusySend <= (NUM_TCB / 3))
879 netif_wake_queue(etdev->netdev);
881 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
885 * et131x_check_send_wait_list - Helper routine for the interrupt handler
886 * @etdev: pointer to our adapter
888 * Takes packets from the send wait queue and posts them to the device (if
891 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
895 spin_lock_irqsave(&etdev->SendWaitLock, flags);
897 while (!list_empty(&etdev->tx_ring.SendWaitQueue) &&
898 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
899 struct list_head *entry;
901 entry = etdev->tx_ring.SendWaitQueue.next;
903 etdev->tx_ring.nWaitSend--;
906 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);