3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
76 #include <linux/delay.h>
78 #include <linux/bitops.h>
79 #include <asm/system.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
95 #include "et1310_tx.h"
98 /* Data for debugging facilities */
99 #ifdef CONFIG_ET131X_DEBUG
100 extern dbg_info_t *et131x_dbginfo;
101 #endif /* CONFIG_ET131X_DEBUG */
103 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
104 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
105 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
107 static int et131x_send_packet(struct sk_buff *skb,
108 struct et131x_adapter *etdev);
109 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
112 * et131x_tx_dma_memory_alloc
113 * @adapter: pointer to our private adapter structure
115 * Returns 0 on success and errno on failure (as defined in errno.h).
117 * Allocates memory that will be visible both to the device and to the CPU.
118 * The OS will pass us packets, pointers to which we will insert in the Tx
119 * Descriptor queue. The device will read this queue to find the packets in
120 * memory. The device will update the "status" in memory each time it xmits a
123 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
126 TX_RING_t *tx_ring = &adapter->TxRing;
128 DBG_ENTER(et131x_dbginfo);
130 /* Allocate memory for the TCB's (Transmit Control Block) */
131 adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
132 GFP_ATOMIC | GFP_DMA);
133 if (!adapter->TxRing.MpTcbMem) {
134 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
135 DBG_LEAVE(et131x_dbginfo);
139 /* Allocate enough memory for the Tx descriptor ring, and allocate
140 * some extra so that the ring can be aligned on a 4k boundary.
142 desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
143 tx_ring->pTxDescRingVa =
144 (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
145 &tx_ring->pTxDescRingPa);
146 if (!adapter->TxRing.pTxDescRingVa) {
147 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
148 DBG_LEAVE(et131x_dbginfo);
152 /* Save physical address
154 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
155 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
156 * are ever returned, make sure the high part is retrieved here before
157 * storing the adjusted address.
159 tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
161 /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
162 et131x_align_allocated_memory(adapter,
163 &tx_ring->pTxDescRingAdjustedPa,
164 &tx_ring->TxDescOffset, 0x0FFF);
166 tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
168 /* Allocate memory for the Tx status block */
169 tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
170 sizeof(TX_STATUS_BLOCK_t),
171 &tx_ring->pTxStatusPa);
172 if (!adapter->TxRing.pTxStatusPa) {
173 DBG_ERROR(et131x_dbginfo,
174 "Cannot alloc memory for Tx status block\n");
175 DBG_LEAVE(et131x_dbginfo);
179 /* Allocate memory for a dummy buffer */
180 tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
182 &tx_ring->pTxDummyBlkPa);
183 if (!adapter->TxRing.pTxDummyBlkPa) {
184 DBG_ERROR(et131x_dbginfo,
185 "Cannot alloc memory for Tx dummy buffer\n");
186 DBG_LEAVE(et131x_dbginfo);
190 DBG_LEAVE(et131x_dbginfo);
195 * et131x_tx_dma_memory_free - Free all memory allocated within this module
196 * @adapter: pointer to our private adapter structure
198 * Returns 0 on success and errno on failure (as defined in errno.h).
200 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
204 DBG_ENTER(et131x_dbginfo);
206 if (adapter->TxRing.pTxDescRingVa) {
207 /* Free memory relating to Tx rings here */
208 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
211 (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
213 pci_free_consistent(adapter->pdev,
215 adapter->TxRing.pTxDescRingVa,
216 adapter->TxRing.pTxDescRingPa);
218 adapter->TxRing.pTxDescRingVa = NULL;
221 /* Free memory for the Tx status block */
222 if (adapter->TxRing.pTxStatusVa) {
223 pci_free_consistent(adapter->pdev,
224 sizeof(TX_STATUS_BLOCK_t),
225 adapter->TxRing.pTxStatusVa,
226 adapter->TxRing.pTxStatusPa);
228 adapter->TxRing.pTxStatusVa = NULL;
231 /* Free memory for the dummy buffer */
232 if (adapter->TxRing.pTxDummyBlkVa) {
233 pci_free_consistent(adapter->pdev,
235 adapter->TxRing.pTxDummyBlkVa,
236 adapter->TxRing.pTxDummyBlkPa);
238 adapter->TxRing.pTxDummyBlkVa = NULL;
241 /* Free the memory for MP_TCB structures */
242 kfree(adapter->TxRing.MpTcbMem);
244 DBG_LEAVE(et131x_dbginfo);
248 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
249 * @etdev: pointer to our private adapter structure
251 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
253 struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
255 DBG_ENTER(et131x_dbginfo);
257 /* Load the hardware with the start of the transmit descriptor ring. */
258 writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
260 writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
263 /* Initialise the transmit DMA engine */
264 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
266 /* Load the completion writeback physical address
268 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
269 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
270 * are ever returned, make sure the high part is retrieved here before
271 * storing the adjusted address.
273 writel(0, &txdma->dma_wb_base_hi);
274 writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
276 memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
278 writel(0, &txdma->service_request.value);
279 etdev->TxRing.txDmaReadyToSend.value = 0;
281 DBG_LEAVE(et131x_dbginfo);
285 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
286 * @etdev: pointer to our adapter structure
288 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
290 DBG_ENTER(et131x_dbginfo);
292 /* Setup the tramsmit dma configuration register */
293 writel(0x101, &etdev->regs->txdma.csr.value);
295 DBG_LEAVE(et131x_dbginfo);
299 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
300 * @etdev: pointer to our adapter structure
302 * Mainly used after a return to the D0 (full-power) state from a lower state.
304 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
306 DBG_ENTER(et131x_dbginfo);
308 if (etdev->RegistryPhyLoopbk) {
309 /* TxDMA is disabled for loopback operation. */
310 writel(0x101, &etdev->regs->txdma.csr.value);
312 TXDMA_CSR_t csr = { 0 };
314 /* Setup the transmit dma configuration register for normal
317 csr.bits.sngl_epkt_mode = 1;
319 csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF;
320 writel(csr.value, &etdev->regs->txdma.csr.value);
323 DBG_LEAVE(et131x_dbginfo);
327 * et131x_init_send - Initialize send data structures
328 * @adapter: pointer to our private adapter structure
330 void et131x_init_send(struct et131x_adapter *adapter)
336 DBG_ENTER(et131x_dbginfo);
338 /* Setup some convenience pointers */
339 tx_ring = &adapter->TxRing;
340 pMpTcb = adapter->TxRing.MpTcbMem;
342 tx_ring->TCBReadyQueueHead = pMpTcb;
344 /* Go through and set up each TCB */
345 for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
346 memset(pMpTcb, 0, sizeof(MP_TCB));
348 /* Set the link pointer in HW TCB to the next TCB in the
349 * chain. If this is the last TCB in the chain, also set the
352 if (TcbCount < NUM_TCB - 1) {
353 pMpTcb->Next = pMpTcb + 1;
355 tx_ring->TCBReadyQueueTail = pMpTcb;
356 pMpTcb->Next = (PMP_TCB) NULL;
362 /* Curr send queue should now be empty */
363 tx_ring->CurrSendHead = (PMP_TCB) NULL;
364 tx_ring->CurrSendTail = (PMP_TCB) NULL;
366 INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
368 DBG_LEAVE(et131x_dbginfo);
372 * et131x_send_packets - This function is called by the OS to send packets
373 * @skb: the packet(s) to send
374 * @netdev:device on which to TX the above packet(s)
376 * Return 0 in almost all cases; non-zero value in extreme hard failure only
378 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
381 struct et131x_adapter *etdev = NULL;
383 DBG_TX_ENTER(et131x_dbginfo);
385 etdev = netdev_priv(netdev);
387 /* Send these packets
389 * NOTE: The Linux Tx entry point is only given one packet at a time
390 * to Tx, so the PacketCount and it's array used makes no sense here
393 /* Queue is not empty or TCB is not available */
394 if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
395 MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
396 /* NOTE: If there's an error on send, no need to queue the
397 * packet under Linux; if we just send an error up to the
398 * netif layer, it will resend the skb to us.
400 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
403 /* We need to see if the link is up; if it's not, make the
404 * netif layer think we're good and drop the packet
407 * if( MP_SHOULD_FAIL_SEND( etdev ) ||
408 * etdev->DriverNoPhyAccess )
410 if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess
411 || !netif_carrier_ok(netdev)) {
412 DBG_VERBOSE(et131x_dbginfo,
413 "Can't Tx, Link is DOWN; drop the packet\n");
415 dev_kfree_skb_any(skb);
418 etdev->net_stats.tx_dropped++;
420 status = et131x_send_packet(skb, etdev);
422 if (status == -ENOMEM) {
424 /* NOTE: If there's an error on send, no need
425 * to queue the packet under Linux; if we just
426 * send an error up to the netif layer, it
427 * will resend the skb to us.
429 DBG_WARNING(et131x_dbginfo,
430 "Resources problem, Queue tx packet\n");
431 } else if (status != 0) {
432 /* On any other error, make netif think we're
433 * OK and drop the packet
435 DBG_WARNING(et131x_dbginfo,
436 "General error, drop packet\n");
438 dev_kfree_skb_any(skb);
441 etdev->net_stats.tx_dropped++;
446 DBG_TX_LEAVE(et131x_dbginfo);
451 * et131x_send_packet - Do the work to send a packet
452 * @skb: the packet(s) to send
453 * @etdev: a pointer to the device's private adapter structure
455 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
457 * Assumption: Send spinlock has been acquired
459 static int et131x_send_packet(struct sk_buff *skb,
460 struct et131x_adapter *etdev)
463 PMP_TCB pMpTcb = NULL;
467 DBG_TX_ENTER(et131x_dbginfo);
469 /* Is our buffer scattered, or continuous? */
470 if (skb_shinfo(skb)->nr_frags == 0) {
471 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
473 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
474 skb_shinfo(skb)->nr_frags);
477 /* All packets must have at least a MAC address and a protocol type */
478 if (skb->len < ETH_HLEN) {
479 DBG_ERROR(et131x_dbginfo,
480 "Packet size < ETH_HLEN (14 bytes)\n");
481 DBG_LEAVE(et131x_dbginfo);
485 /* Get a TCB for this packet */
486 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
488 pMpTcb = etdev->TxRing.TCBReadyQueueHead;
490 if (pMpTcb == NULL) {
491 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
493 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
494 DBG_TX_LEAVE(et131x_dbginfo);
498 etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
500 if (etdev->TxRing.TCBReadyQueueHead == NULL)
501 etdev->TxRing.TCBReadyQueueTail = NULL;
503 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
505 pMpTcb->PacketLength = skb->len;
506 pMpTcb->Packet = skb;
508 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
509 shbufva = (uint16_t *) skb->data;
511 if ((shbufva[0] == 0xffff) &&
512 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
513 MP_SET_FLAG(pMpTcb, fMP_DEST_BROAD);
514 } else if ((shbufva[0] & 0x3) == 0x0001) {
515 MP_SET_FLAG(pMpTcb, fMP_DEST_MULTI);
521 /* Call the NIC specific send handler. */
523 status = nic_send_packet(etdev, pMpTcb);
526 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
528 if (etdev->TxRing.TCBReadyQueueTail) {
529 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
531 /* Apparently ready Q is empty. */
532 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
535 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
537 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
539 DBG_TX_LEAVE(et131x_dbginfo);
543 DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB);
545 DBG_TX_LEAVE(et131x_dbginfo);
550 * nic_send_packet - NIC specific send handler for version B silicon.
551 * @etdev: pointer to our adapter
552 * @pMpTcb: pointer to MP_TCB
554 * Returns 0 or errno.
556 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
559 TX_DESC_ENTRY_t CurDesc[24];
560 uint32_t FragmentNumber = 0;
561 uint32_t thiscopy, remainder;
562 struct sk_buff *pPacket = pMpTcb->Packet;
563 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
564 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
567 DBG_TX_ENTER(et131x_dbginfo);
569 /* Part of the optimizations of this send routine restrict us to
570 * sending 24 fragments at a pass. In practice we should never see
571 * more than 5 fragments.
573 * NOTE: The older version of this function (below) can handle any
574 * number of fragments. If needed, we can call this function,
575 * although it is less efficient.
577 if (FragListCount > 23) {
578 DBG_TX_LEAVE(et131x_dbginfo);
582 memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
584 for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
585 /* If there is something in this element, lets get a
586 * descriptor from the ring and get the necessary data
588 if (loopIndex == 0) {
589 /* If the fragments are smaller than a standard MTU,
590 * then map them to a single descriptor in the Tx
591 * Desc ring. However, if they're larger, as is
592 * possible with support for jumbo packets, then
593 * split them each across 2 descriptors.
595 * This will work until we determine why the hardware
596 * doesn't seem to like large fragments.
598 if ((pPacket->len - pPacket->data_len) <= 1514) {
599 DBG_TX(et131x_dbginfo,
600 "Got packet of length %d, "
601 "filling desc entry %d, "
603 (pPacket->len - pPacket->data_len),
604 etdev->TxRing.txDmaReadyToSend.bits.
607 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
609 CurDesc[FragmentNumber].word2.bits.
611 pPacket->len - pPacket->data_len;
613 /* NOTE: Here, the dma_addr_t returned from
614 * pci_map_single() is implicitly cast as a
615 * uint32_t. Although dma_addr_t can be
616 * 64-bit, the address returned by
617 * pci_map_single() is always 32-bit
618 * addressable (as defined by the pci/dma
621 CurDesc[FragmentNumber++].DataBufferPtrLow =
622 pci_map_single(etdev->pdev,
628 DBG_TX(et131x_dbginfo,
629 "Got packet of length %d, "
630 "filling desc entry %d, "
632 (pPacket->len - pPacket->data_len),
633 etdev->TxRing.txDmaReadyToSend.bits.
636 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
638 CurDesc[FragmentNumber].word2.bits.
640 ((pPacket->len - pPacket->data_len) / 2);
642 /* NOTE: Here, the dma_addr_t returned from
643 * pci_map_single() is implicitly cast as a
644 * uint32_t. Although dma_addr_t can be
645 * 64-bit, the address returned by
646 * pci_map_single() is always 32-bit
647 * addressable (as defined by the pci/dma
650 CurDesc[FragmentNumber++].DataBufferPtrLow =
651 pci_map_single(etdev->pdev,
654 pPacket->data_len) / 2),
656 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
658 CurDesc[FragmentNumber].word2.bits.
660 ((pPacket->len - pPacket->data_len) / 2);
662 /* NOTE: Here, the dma_addr_t returned from
663 * pci_map_single() is implicitly cast as a
664 * uint32_t. Although dma_addr_t can be
665 * 64-bit, the address returned by
666 * pci_map_single() is always 32-bit
667 * addressable (as defined by the pci/dma
670 CurDesc[FragmentNumber++].DataBufferPtrLow =
671 pci_map_single(etdev->pdev,
674 pPacket->data_len) / 2),
676 pPacket->data_len) / 2),
680 DBG_TX(et131x_dbginfo,
681 "Got packet of length %d,"
682 "filling desc entry %d\n"
684 pFragList[loopIndex].size,
685 etdev->TxRing.txDmaReadyToSend.bits.val,
688 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
690 CurDesc[FragmentNumber].word2.bits.length_in_bytes =
691 pFragList[loopIndex - 1].size;
693 /* NOTE: Here, the dma_addr_t returned from
694 * pci_map_page() is implicitly cast as a uint32_t.
695 * Although dma_addr_t can be 64-bit, the address
696 * returned by pci_map_page() is always 32-bit
697 * addressable (as defined by the pci/dma subsystem)
699 CurDesc[FragmentNumber++].DataBufferPtrLow =
700 pci_map_page(etdev->pdev,
701 pFragList[loopIndex - 1].page,
702 pFragList[loopIndex - 1].page_offset,
703 pFragList[loopIndex - 1].size,
708 if (FragmentNumber == 0) {
709 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
713 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
714 if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
715 PARM_TX_NUM_BUFS_DEF) {
716 CurDesc[FragmentNumber - 1].word3.value = 0x5;
717 etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
719 CurDesc[FragmentNumber - 1].word3.value = 0x1;
722 CurDesc[FragmentNumber - 1].word3.value = 0x5;
725 CurDesc[0].word3.bits.f = 1;
727 pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
728 pMpTcb->PacketStaleCount = 0;
730 spin_lock_irqsave(&etdev->SendHWLock, flags);
733 NUM_DESC_PER_RING_TX - etdev->TxRing.txDmaReadyToSend.bits.val;
735 if (thiscopy >= FragmentNumber) {
737 thiscopy = FragmentNumber;
739 remainder = FragmentNumber - thiscopy;
742 memcpy(etdev->TxRing.pTxDescRingVa +
743 etdev->TxRing.txDmaReadyToSend.bits.val, CurDesc,
744 sizeof(TX_DESC_ENTRY_t) * thiscopy);
746 etdev->TxRing.txDmaReadyToSend.bits.val += thiscopy;
748 if ((etdev->TxRing.txDmaReadyToSend.bits.val == 0) ||
749 (etdev->TxRing.txDmaReadyToSend.bits.val ==
750 NUM_DESC_PER_RING_TX)) {
751 if (etdev->TxRing.txDmaReadyToSend.bits.wrap)
752 etdev->TxRing.txDmaReadyToSend.value = 0;
754 etdev->TxRing.txDmaReadyToSend.value = 0x400;
758 memcpy(etdev->TxRing.pTxDescRingVa,
760 sizeof(TX_DESC_ENTRY_t) * remainder);
762 etdev->TxRing.txDmaReadyToSend.bits.val += remainder;
765 if (etdev->TxRing.txDmaReadyToSend.bits.val == 0) {
766 if (etdev->TxRing.txDmaReadyToSend.value)
767 pMpTcb->WrIndex.value = NUM_DESC_PER_RING_TX - 1;
769 pMpTcb->WrIndex.value =
770 0x400 | (NUM_DESC_PER_RING_TX - 1);
772 pMpTcb->WrIndex.value =
773 etdev->TxRing.txDmaReadyToSend.value - 1;
775 spin_lock(&etdev->TCBSendQLock);
777 if (etdev->TxRing.CurrSendTail)
778 etdev->TxRing.CurrSendTail->Next = pMpTcb;
780 etdev->TxRing.CurrSendHead = pMpTcb;
782 etdev->TxRing.CurrSendTail = pMpTcb;
784 DBG_ASSERT(pMpTcb->Next == NULL);
786 etdev->TxRing.nBusySend++;
788 spin_unlock(&etdev->TCBSendQLock);
790 /* Write the new write pointer back to the device. */
791 writel(etdev->TxRing.txDmaReadyToSend.value,
792 &etdev->regs->txdma.service_request.value);
794 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
795 * timer to wake us up if this packet isn't followed by N more.
797 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
798 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
799 &etdev->regs->global.watchdog_timer);
802 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
804 DBG_TX_LEAVE(et131x_dbginfo);
809 * NOTE: For now, keep this older version of NICSendPacket around for
810 * reference, even though it's not used
815 * NICSendPacket - NIC specific send handler.
816 * @etdev: pointer to our adapter
817 * @pMpTcb: pointer to MP_TCB
819 * Returns 0 on succes, errno on failure.
821 * This version of the send routine is designed for version A silicon.
822 * Assumption - Send spinlock has been acquired.
824 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
826 uint32_t loopIndex, fragIndex, loopEnd;
827 uint32_t splitfirstelem = 0;
828 uint32_t SegmentSize = 0;
829 TX_DESC_ENTRY_t CurDesc;
830 TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
831 uint32_t SlotsAvailable;
832 DMA10W_t ServiceComplete;
834 struct sk_buff *pPacket = pMpTcb->Packet;
835 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
836 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
838 DBG_TX_ENTER(et131x_dbginfo);
840 ServiceComplete.value =
841 readl(&etdev->regs->txdma.NewServiceComplete.value);
844 * Attempt to fix TWO hardware bugs:
845 * 1) NEVER write an odd number of descriptors.
846 * 2) If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
847 * packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
848 * descriptor IN HALF DUPLEX MODE ONLY
849 * NOTE that (2) interacts with (1). If the packet is less than
850 * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
851 * Therefore if it is even now, it will eventually end up odd, and
852 * so will need adjusting.
854 * VLAN tags get involved since VLAN tags add another one or two
857 DBG_TX(et131x_dbginfo,
858 "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
860 if ((etdev->duplex_mode == 0)
861 && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
862 DBG_TX(et131x_dbginfo,
863 "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
864 if ((FragListCount & 0x1) == 0) {
865 DBG_TX(et131x_dbginfo,
866 "Even number of descs, split 1st elem\n");
868 /* SegmentSize = pFragList[0].size / 2; */
869 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
871 } else if (FragListCount & 0x1) {
872 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
875 /* SegmentSize = pFragList[0].size / 2; */
876 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
879 spin_lock_irqsave(&etdev->SendHWLock, flags);
881 if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
882 ServiceComplete.bits.serv_cpl_wrap) {
883 /* The ring hasn't wrapped. Slots available should be
884 * (RING_SIZE) - the difference between the two pointers.
886 SlotsAvailable = NUM_DESC_PER_RING_TX -
887 (etdev->TxRing.txDmaReadyToSend.bits.serv_req -
888 ServiceComplete.bits.serv_cpl);
890 /* The ring has wrapped. Slots available should be the
891 * difference between the two pointers.
893 SlotsAvailable = ServiceComplete.bits.serv_cpl -
894 etdev->TxRing.txDmaReadyToSend.bits.serv_req;
897 if ((FragListCount + splitfirstelem) > SlotsAvailable) {
898 DBG_WARNING(et131x_dbginfo,
899 "Not Enough Space in Tx Desc Ring\n");
900 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
904 loopEnd = (FragListCount) + splitfirstelem;
907 DBG_TX(et131x_dbginfo,
909 "Packet (SKB) : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
910 "FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n",
912 pPacket, pPacket->len, pPacket->data_len,
913 FragListCount, splitfirstelem, loopEnd);
915 for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
916 if (loopIndex > splitfirstelem)
919 DBG_TX(et131x_dbginfo,
920 "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
923 /* If there is something in this element, let's get a
924 * descriptor from the ring and get the necessary data
926 DBG_TX(et131x_dbginfo,
928 "filling desc entry %d\n",
930 etdev->TxRing.txDmaReadyToSend.bits.serv_req);
933 * NOTE - Should we do a paranoia check here to make sure the fragment
934 * actually has a length? It's HIGHLY unlikely the fragment would
938 /* NOTE - Currently always getting 32-bit addrs, and
939 * dma_addr_t is only 32-bit, so leave "high" ptr
941 * CurDesc.DataBufferPtrHigh = 0;
944 CurDesc.word2.value = 0;
945 CurDesc.word3.value = 0;
947 if (fragIndex == 0) {
948 if (splitfirstelem) {
949 DBG_TX(et131x_dbginfo,
950 "Split first element: YES\n");
952 if (loopIndex == 0) {
953 DBG_TX(et131x_dbginfo,
954 "Got fragment of length %d, fragIndex: %d\n",
958 DBG_TX(et131x_dbginfo,
965 CurDesc.DataBufferPtrLow =
966 pci_map_single(etdev->
972 DBG_TX(et131x_dbginfo,
973 "pci_map_single() returns: 0x%08x\n",
977 DBG_TX(et131x_dbginfo,
978 "Got fragment of length %d, fragIndex: %d\n",
982 DBG_TX(et131x_dbginfo,
983 "Leftover Size: %d\n",
993 CurDesc.DataBufferPtrLow =
994 pci_map_single(etdev->
1005 DBG_TX(et131x_dbginfo,
1006 "pci_map_single() returns: 0x%08x\n",
1011 DBG_TX(et131x_dbginfo,
1012 "Split first element: NO\n");
1014 CurDesc.word2.bits.length_in_bytes =
1015 pPacket->len - pPacket->data_len;
1017 CurDesc.DataBufferPtrLow =
1018 pci_map_single(etdev->pdev,
1023 DBG_TX(et131x_dbginfo,
1024 "pci_map_single() returns: 0x%08x\n",
1025 CurDesc.DataBufferPtrLow);
1029 CurDesc.word2.bits.length_in_bytes =
1030 pFragList[fragIndex - 1].size;
1031 CurDesc.DataBufferPtrLow =
1032 pci_map_page(etdev->pdev,
1033 pFragList[fragIndex - 1].page,
1034 pFragList[fragIndex -
1036 pFragList[fragIndex - 1].size,
1038 DBG_TX(et131x_dbginfo,
1039 "pci_map_page() returns: 0x%08x\n",
1040 CurDesc.DataBufferPtrLow);
1043 if (loopIndex == 0) {
1044 /* This is the first descriptor of the packet
1046 * Set the "f" bit to indicate this is the
1047 * first descriptor in the packet.
1049 DBG_TX(et131x_dbginfo,
1050 "This is our FIRST descriptor\n");
1051 CurDesc.word3.bits.f = 1;
1053 pMpTcb->WrIndexStart =
1054 etdev->TxRing.txDmaReadyToSend;
1057 if ((loopIndex == (loopEnd - 1)) &&
1058 (etdev->duplex_mode ||
1059 (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1060 /* This is the Last descriptor of the packet */
1061 DBG_TX(et131x_dbginfo,
1062 "THIS is our LAST descriptor\n");
1064 if (etdev->linkspeed ==
1065 TRUEPHY_SPEED_1000MBPS) {
1066 if (++etdev->TxRing.
1067 TxPacketsSinceLastinterrupt >=
1068 PARM_TX_NUM_BUFS_DEF) {
1069 CurDesc.word3.value = 0x5;
1071 TxPacketsSinceLastinterrupt
1074 CurDesc.word3.value = 0x1;
1077 CurDesc.word3.value = 0x5;
1080 /* Following index will be used during freeing
1084 etdev->TxRing.txDmaReadyToSend;
1085 pMpTcb->PacketStaleCount = 0;
1088 /* Copy the descriptor (filled above) into the
1089 * descriptor ring at the next free entry. Advance
1090 * the "next free entry" variable
1092 memcpy(etdev->TxRing.pTxDescRingVa +
1093 etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1094 &CurDesc, sizeof(TX_DESC_ENTRY_t));
1097 etdev->TxRing.pTxDescRingVa +
1098 etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1100 DBG_TX(et131x_dbginfo,
1101 "CURRENT DESCRIPTOR\n"
1102 "\tAddress : 0x%p\n"
1103 "\tDataBufferPtrHigh : 0x%08x\n"
1104 "\tDataBufferPtrLow : 0x%08x\n"
1105 "\tword2 : 0x%08x\n"
1106 "\tword3 : 0x%08x\n",
1108 CurDescPostCopy->DataBufferPtrHigh,
1109 CurDescPostCopy->DataBufferPtrLow,
1110 CurDescPostCopy->word2.value,
1111 CurDescPostCopy->word3.value);
1113 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1114 NUM_DESC_PER_RING_TX) {
1115 if (etdev->TxRing.txDmaReadyToSend.bits.
1117 etdev->TxRing.txDmaReadyToSend.
1120 etdev->TxRing.txDmaReadyToSend.
1127 if (etdev->duplex_mode == 0 &&
1128 pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1129 /* NOTE - Same 32/64-bit issue as above... */
1130 CurDesc.DataBufferPtrHigh = 0x0;
1131 CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa;
1132 CurDesc.word2.value = 0;
1134 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1135 if (++etdev->TxRing.TxPacketsSinceLastinterrupt >=
1136 PARM_TX_NUM_BUFS_DEF) {
1137 CurDesc.word3.value = 0x5;
1138 etdev->TxRing.TxPacketsSinceLastinterrupt =
1141 CurDesc.word3.value = 0x1;
1144 CurDesc.word3.value = 0x5;
1147 CurDesc.word2.bits.length_in_bytes =
1148 NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1150 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend;
1152 memcpy(etdev->TxRing.pTxDescRingVa +
1153 etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1154 &CurDesc, sizeof(TX_DESC_ENTRY_t));
1157 etdev->TxRing.pTxDescRingVa +
1158 etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1160 DBG_TX(et131x_dbginfo,
1161 "CURRENT DESCRIPTOR\n"
1162 "\tAddress : 0x%p\n"
1163 "\tDataBufferPtrHigh : 0x%08x\n"
1164 "\tDataBufferPtrLow : 0x%08x\n"
1165 "\tword2 : 0x%08x\n"
1166 "\tword3 : 0x%08x\n",
1168 CurDescPostCopy->DataBufferPtrHigh,
1169 CurDescPostCopy->DataBufferPtrLow,
1170 CurDescPostCopy->word2.value,
1171 CurDescPostCopy->word3.value);
1173 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1174 NUM_DESC_PER_RING_TX) {
1175 if (etdev->TxRing.txDmaReadyToSend.bits.
1177 etdev->TxRing.txDmaReadyToSend.value = 0;
1179 etdev->TxRing.txDmaReadyToSend.value = 0x400;
1183 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1184 /* etdev->TxRing.txDmaReadyToSend.value, */
1185 etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1186 NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1189 spin_lock(&etdev->TCBSendQLock);
1191 if (etdev->TxRing.CurrSendTail)
1192 etdev->TxRing.CurrSendTail->Next = pMpTcb;
1194 etdev->TxRing.CurrSendHead = pMpTcb;
1196 etdev->TxRing.CurrSendTail = pMpTcb;
1198 DBG_ASSERT(pMpTcb->Next == NULL);
1200 etdev->TxRing.nBusySend++;
1202 spin_unlock(&etdev->TCBSendQLock);
1204 /* Write the new write pointer back to the device. */
1205 writel(etdev->TxRing.txDmaReadyToSend.value,
1206 &etdev->regs->txdma.service_request.value);
1208 #ifdef CONFIG_ET131X_DEBUG
1209 DumpDeviceBlock(DBG_TX_ON, etdev, 1);
1212 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
1213 * timer to wake us up if this packet isn't followed by N more.
1215 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1216 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1217 &etdev->regs->global.watchdog_timer);
1220 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
1222 DBG_TX_LEAVE(et131x_dbginfo);
1229 * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1230 * @etdev: pointer to our adapter
1231 * @pMpTcb: pointer to MP_TCB
1233 * Assumption - Send spinlock has been acquired
1235 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
1238 unsigned long flags;
1239 TX_DESC_ENTRY_t *desc = NULL;
1240 struct net_device_stats *stats = &etdev->net_stats;
1242 if (MP_TEST_FLAG(pMpTcb, fMP_DEST_BROAD))
1243 atomic_inc(&etdev->Stats.brdcstxmt);
1244 else if (MP_TEST_FLAG(pMpTcb, fMP_DEST_MULTI))
1245 atomic_inc(&etdev->Stats.multixmt);
1247 atomic_inc(&etdev->Stats.unixmt);
1249 if (pMpTcb->Packet) {
1250 stats->tx_bytes += pMpTcb->Packet->len;
1252 /* Iterate through the TX descriptors on the ring
1253 * corresponding to this packet and umap the fragments
1256 DBG_TX(et131x_dbginfo,
1257 "Unmap descriptors Here\n"
1260 "TCB PacketLength : %d\n"
1261 "TCB WrIndex.value : 0x%08x\n"
1262 "TCB WrIndex.bits.val : %d\n"
1263 "TCB WrIndex.value : 0x%08x\n"
1264 "TCB WrIndex.bits.val : %d\n",
1267 pMpTcb->PacketLength,
1268 pMpTcb->WrIndexStart.value,
1269 pMpTcb->WrIndexStart.bits.val,
1270 pMpTcb->WrIndex.value,
1271 pMpTcb->WrIndex.bits.val);
1275 (TX_DESC_ENTRY_t *) (etdev->TxRing.
1277 pMpTcb->WrIndexStart.bits.val);
1279 DBG_TX(et131x_dbginfo,
1280 "CURRENT DESCRIPTOR\n"
1281 "\tAddress : 0x%p\n"
1282 "\tDataBufferPtrHigh : 0x%08x\n"
1283 "\tDataBufferPtrLow : 0x%08x\n"
1284 "\tword2 : 0x%08x\n"
1285 "\tword3 : 0x%08x\n",
1287 desc->DataBufferPtrHigh,
1288 desc->DataBufferPtrLow,
1292 pci_unmap_single(etdev->pdev,
1293 desc->DataBufferPtrLow,
1294 desc->word2.value, PCI_DMA_TODEVICE);
1296 if (++pMpTcb->WrIndexStart.bits.val >=
1297 NUM_DESC_PER_RING_TX) {
1298 if (pMpTcb->WrIndexStart.bits.wrap)
1299 pMpTcb->WrIndexStart.value = 0;
1301 pMpTcb->WrIndexStart.value = 0x400;
1303 } while (desc != (etdev->TxRing.pTxDescRingVa +
1304 pMpTcb->WrIndex.bits.val));
1306 DBG_TX(et131x_dbginfo,
1307 "Free Packet (SKB) : 0x%p\n", pMpTcb->Packet);
1309 dev_kfree_skb_any(pMpTcb->Packet);
1312 memset(pMpTcb, 0, sizeof(MP_TCB));
1314 /* Add the TCB to the Ready Q */
1315 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
1317 etdev->Stats.opackets++;
1319 if (etdev->TxRing.TCBReadyQueueTail) {
1320 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1322 /* Apparently ready Q is empty. */
1323 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
1326 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
1328 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
1330 DBG_ASSERT(etdev->TxRing.nBusySend >= 0);
1334 * et131x_free_busy_send_packets - Free and complete the stopped active sends
1335 * @etdev: pointer to our adapter
1337 * Assumption - Send spinlock has been acquired
1339 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
1342 struct list_head *entry;
1343 unsigned long flags;
1344 uint32_t FreeCounter = 0;
1346 DBG_ENTER(et131x_dbginfo);
1348 while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
1349 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1351 etdev->TxRing.nWaitSend--;
1352 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1354 entry = etdev->TxRing.SendWaitQueue.next;
1357 etdev->TxRing.nWaitSend = 0;
1359 /* Any packets being sent? Check the first TCB on the send list */
1360 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1362 pMpTcb = etdev->TxRing.CurrSendHead;
1364 while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1365 PMP_TCB pNext = pMpTcb->Next;
1367 etdev->TxRing.CurrSendHead = pNext;
1370 etdev->TxRing.CurrSendTail = NULL;
1372 etdev->TxRing.nBusySend--;
1374 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1376 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1379 et131x_free_send_packet(etdev, pMpTcb);
1381 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1383 pMpTcb = etdev->TxRing.CurrSendHead;
1386 if (FreeCounter == NUM_TCB) {
1387 DBG_ERROR(et131x_dbginfo,
1388 "MpFreeBusySendPackets exited loop for a bad reason\n");
1392 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1394 etdev->TxRing.nBusySend = 0;
1396 DBG_LEAVE(et131x_dbginfo);
1400 * et131x_handle_send_interrupt - Interrupt handler for sending processing
1401 * @etdev: pointer to our adapter
1403 * Re-claim the send resources, complete sends and get more to send from
1404 * the send wait queue.
1406 * Assumption - Send spinlock has been acquired
1408 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
1410 DBG_TX_ENTER(et131x_dbginfo);
1412 /* Mark as completed any packets which have been sent by the device. */
1413 et131x_update_tcb_list(etdev);
1415 /* If we queued any transmits because we didn't have any TCBs earlier,
1416 * dequeue and send those packets now, as long as we have free TCBs.
1418 et131x_check_send_wait_list(etdev);
1420 DBG_TX_LEAVE(et131x_dbginfo);
1424 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1425 * @etdev: pointer to our adapter
1427 * Re-claims the send resources and completes sends. Can also be called as
1428 * part of the NIC send routine when the "ServiceComplete" indication has
1431 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
1433 unsigned long flags;
1434 DMA10W_t ServiceComplete;
1437 ServiceComplete.value =
1438 readl(&etdev->regs->txdma.NewServiceComplete.value);
1440 /* Has the ring wrapped? Process any descriptors that do not have
1441 * the same "wrap" indicator as the current completion indicator
1443 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1445 pMpTcb = etdev->TxRing.CurrSendHead;
1447 ServiceComplete.bits.wrap != pMpTcb->WrIndex.bits.wrap &&
1448 ServiceComplete.bits.val < pMpTcb->WrIndex.bits.val) {
1449 etdev->TxRing.nBusySend--;
1450 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1451 if (pMpTcb->Next == NULL)
1452 etdev->TxRing.CurrSendTail = NULL;
1454 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1455 et131x_free_send_packet(etdev, pMpTcb);
1456 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1458 /* Goto the next packet */
1459 pMpTcb = etdev->TxRing.CurrSendHead;
1462 ServiceComplete.bits.wrap == pMpTcb->WrIndex.bits.wrap &&
1463 ServiceComplete.bits.val > pMpTcb->WrIndex.bits.val) {
1464 etdev->TxRing.nBusySend--;
1465 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1466 if (pMpTcb->Next == NULL)
1467 etdev->TxRing.CurrSendTail = NULL;
1469 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1470 et131x_free_send_packet(etdev, pMpTcb);
1471 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1473 /* Goto the next packet */
1474 pMpTcb = etdev->TxRing.CurrSendHead;
1477 /* Wake up the queue when we hit a low-water mark */
1478 if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
1479 netif_wake_queue(etdev->netdev);
1481 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1485 * et131x_check_send_wait_list - Helper routine for the interrupt handler
1486 * @etdev: pointer to our adapter
1488 * Takes packets from the send wait queue and posts them to the device (if
1491 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
1493 unsigned long flags;
1495 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1497 while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
1498 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
1499 struct list_head *entry;
1501 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1503 entry = etdev->TxRing.SendWaitQueue.next;
1505 etdev->TxRing.nWaitSend--;
1507 DBG_WARNING(et131x_dbginfo,
1508 "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1509 etdev->TxRing.nWaitSend);
1512 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);