3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
75 #include <linux/delay.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92 #include "et131x_isr.h"
94 #include "et1310_tx.h"
97 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
99 static int et131x_send_packet(struct sk_buff *skb,
100 struct et131x_adapter *etdev);
101 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb);
104 * et131x_tx_dma_memory_alloc
105 * @adapter: pointer to our private adapter structure
107 * Returns 0 on success and errno on failure (as defined in errno.h).
109 * Allocates memory that will be visible both to the device and to the CPU.
110 * The OS will pass us packets, pointers to which we will insert in the Tx
111 * Descriptor queue. The device will read this queue to find the packets in
112 * memory. The device will update the "status" in memory each time it xmits a
115 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
118 struct tx_ring *tx_ring = &adapter->tx_ring;
120 /* Allocate memory for the TCB's (Transmit Control Block) */
121 adapter->tx_ring.tcb_ring = (struct tcb *)
122 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
123 if (!adapter->tx_ring.tcb_ring) {
124 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
128 /* Allocate enough memory for the Tx descriptor ring, and allocate
129 * some extra so that the ring can be aligned on a 4k boundary.
131 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
132 tx_ring->tx_desc_ring =
133 (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
134 &tx_ring->tx_desc_ring_pa);
135 if (!adapter->tx_ring.tx_desc_ring) {
136 dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
140 /* Save physical address
142 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
143 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
144 * are ever returned, make sure the high part is retrieved here before
145 * storing the adjusted address.
147 /* Allocate memory for the Tx status block */
148 tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
150 &tx_ring->tx_status_pa);
151 if (!adapter->tx_ring.tx_status_pa) {
152 dev_err(&adapter->pdev->dev,
153 "Cannot alloc memory for Tx status block\n");
160 * et131x_tx_dma_memory_free - Free all memory allocated within this module
161 * @adapter: pointer to our private adapter structure
163 * Returns 0 on success and errno on failure (as defined in errno.h).
165 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
169 if (adapter->tx_ring.tx_desc_ring) {
170 /* Free memory relating to Tx rings here */
171 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
173 pci_free_consistent(adapter->pdev,
175 adapter->tx_ring.tx_desc_ring,
176 adapter->tx_ring.tx_desc_ring_pa);
177 adapter->tx_ring.tx_desc_ring = NULL;
180 /* Free memory for the Tx status block */
181 if (adapter->tx_ring.tx_status) {
182 pci_free_consistent(adapter->pdev,
184 adapter->tx_ring.tx_status,
185 adapter->tx_ring.tx_status_pa);
187 adapter->tx_ring.tx_status = NULL;
189 /* Free the memory for the tcb structures */
190 kfree(adapter->tx_ring.tcb_ring);
194 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
195 * @etdev: pointer to our private adapter structure
197 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
199 struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
201 /* Load the hardware with the start of the transmit descriptor ring. */
202 writel((u32) ((u64)etdev->tx_ring.tx_desc_ring_pa >> 32),
204 writel((u32) etdev->tx_ring.tx_desc_ring_pa,
207 /* Initialise the transmit DMA engine */
208 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
210 /* Load the completion writeback physical address */
211 writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
212 &txdma->dma_wb_base_hi);
213 writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
215 *etdev->tx_ring.tx_status = 0;
217 writel(0, &txdma->service_request);
218 etdev->tx_ring.send_idx = 0;
222 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
223 * @etdev: pointer to our adapter structure
225 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
227 /* Setup the tramsmit dma configuration register */
228 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
229 &etdev->regs->txdma.csr);
233 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
234 * @etdev: pointer to our adapter structure
236 * Mainly used after a return to the D0 (full-power) state from a lower state.
238 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
240 /* Setup the transmit dma configuration register for normal
243 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
244 &etdev->regs->txdma.csr);
248 * et131x_init_send - Initialize send data structures
249 * @adapter: pointer to our private adapter structure
251 void et131x_init_send(struct et131x_adapter *adapter)
255 struct tx_ring *tx_ring;
257 /* Setup some convenience pointers */
258 tx_ring = &adapter->tx_ring;
259 tcb = adapter->tx_ring.tcb_ring;
261 tx_ring->tcb_qhead = tcb;
263 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
265 /* Go through and set up each TCB */
266 for (ct = 0; ct++ < NUM_TCB; tcb++)
267 /* Set the link pointer in HW TCB to the next TCB in the
268 * chain. If this is the last TCB in the chain, also set the
274 tx_ring->tcb_qtail = tcb;
276 /* Curr send queue should now be empty */
277 tx_ring->send_head = NULL;
278 tx_ring->send_tail = NULL;
282 * et131x_send_packets - This function is called by the OS to send packets
283 * @skb: the packet(s) to send
284 * @netdev:device on which to TX the above packet(s)
286 * Return 0 in almost all cases; non-zero value in extreme hard failure only
288 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
291 struct et131x_adapter *etdev = NULL;
293 etdev = netdev_priv(netdev);
295 /* Send these packets
297 * NOTE: The Linux Tx entry point is only given one packet at a time
298 * to Tx, so the PacketCount and it's array used makes no sense here
301 /* TCB is not available */
302 if (etdev->tx_ring.used >= NUM_TCB) {
303 /* NOTE: If there's an error on send, no need to queue the
304 * packet under Linux; if we just send an error up to the
305 * netif layer, it will resend the skb to us.
309 /* We need to see if the link is up; if it's not, make the
310 * netif layer think we're good and drop the packet
312 if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
313 !netif_carrier_ok(netdev)) {
314 dev_kfree_skb_any(skb);
317 etdev->net_stats.tx_dropped++;
319 status = et131x_send_packet(skb, etdev);
320 if (status != 0 && status != -ENOMEM) {
321 /* On any other error, make netif think we're
322 * OK and drop the packet
324 dev_kfree_skb_any(skb);
326 etdev->net_stats.tx_dropped++;
334 * et131x_send_packet - Do the work to send a packet
335 * @skb: the packet(s) to send
336 * @etdev: a pointer to the device's private adapter structure
338 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
340 * Assumption: Send spinlock has been acquired
342 static int et131x_send_packet(struct sk_buff *skb,
343 struct et131x_adapter *etdev)
346 struct tcb *tcb = NULL;
350 /* All packets must have at least a MAC address and a protocol type */
351 if (skb->len < ETH_HLEN)
354 /* Get a TCB for this packet */
355 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
357 tcb = etdev->tx_ring.tcb_qhead;
360 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
364 etdev->tx_ring.tcb_qhead = tcb->next;
366 if (etdev->tx_ring.tcb_qhead == NULL)
367 etdev->tx_ring.tcb_qtail = NULL;
369 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
373 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
374 shbufva = (u16 *) skb->data;
376 if ((shbufva[0] == 0xffff) &&
377 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
378 tcb->flags |= fMP_DEST_BROAD;
379 } else if ((shbufva[0] & 0x3) == 0x0001) {
380 tcb->flags |= fMP_DEST_MULTI;
386 /* Call the NIC specific send handler. */
387 status = nic_send_packet(etdev, tcb);
390 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
392 if (etdev->tx_ring.tcb_qtail) {
393 etdev->tx_ring.tcb_qtail->next = tcb;
395 /* Apparently ready Q is empty. */
396 etdev->tx_ring.tcb_qhead = tcb;
399 etdev->tx_ring.tcb_qtail = tcb;
400 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
403 WARN_ON(etdev->tx_ring.used > NUM_TCB);
408 * nic_send_packet - NIC specific send handler for version B silicon.
409 * @etdev: pointer to our adapter
410 * @tcb: pointer to struct tcb
412 * Returns 0 or errno.
414 static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
417 struct tx_desc desc[24]; /* 24 x 16 byte */
419 u32 thiscopy, remainder;
420 struct sk_buff *skb = tcb->skb;
421 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
422 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
425 /* Part of the optimizations of this send routine restrict us to
426 * sending 24 fragments at a pass. In practice we should never see
427 * more than 5 fragments.
429 * NOTE: The older version of this function (below) can handle any
430 * number of fragments. If needed, we can call this function,
431 * although it is less efficient.
436 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
438 for (i = 0; i < nr_frags; i++) {
439 /* If there is something in this element, lets get a
440 * descriptor from the ring and get the necessary data
443 /* If the fragments are smaller than a standard MTU,
444 * then map them to a single descriptor in the Tx
445 * Desc ring. However, if they're larger, as is
446 * possible with support for jumbo packets, then
447 * split them each across 2 descriptors.
449 * This will work until we determine why the hardware
450 * doesn't seem to like large fragments.
452 if ((skb->len - skb->data_len) <= 1514) {
453 desc[frag].addr_hi = 0;
454 /* Low 16bits are length, high is vlan and
455 unused currently so zero */
456 desc[frag].len_vlan =
457 skb->len - skb->data_len;
459 /* NOTE: Here, the dma_addr_t returned from
460 * pci_map_single() is implicitly cast as a
461 * u32. Although dma_addr_t can be
462 * 64-bit, the address returned by
463 * pci_map_single() is always 32-bit
464 * addressable (as defined by the pci/dma
467 desc[frag++].addr_lo =
468 pci_map_single(etdev->pdev,
474 desc[frag].addr_hi = 0;
475 desc[frag].len_vlan =
476 (skb->len - skb->data_len) / 2;
478 /* NOTE: Here, the dma_addr_t returned from
479 * pci_map_single() is implicitly cast as a
480 * u32. Although dma_addr_t can be
481 * 64-bit, the address returned by
482 * pci_map_single() is always 32-bit
483 * addressable (as defined by the pci/dma
486 desc[frag++].addr_lo =
487 pci_map_single(etdev->pdev,
492 desc[frag].addr_hi = 0;
494 desc[frag].len_vlan =
495 (skb->len - skb->data_len) / 2;
497 /* NOTE: Here, the dma_addr_t returned from
498 * pci_map_single() is implicitly cast as a
499 * u32. Although dma_addr_t can be
500 * 64-bit, the address returned by
501 * pci_map_single() is always 32-bit
502 * addressable (as defined by the pci/dma
505 desc[frag++].addr_lo =
506 pci_map_single(etdev->pdev,
515 desc[frag].addr_hi = 0;
516 desc[frag].len_vlan =
519 /* NOTE: Here, the dma_addr_t returned from
520 * pci_map_page() is implicitly cast as a u32.
521 * Although dma_addr_t can be 64-bit, the address
522 * returned by pci_map_page() is always 32-bit
523 * addressable (as defined by the pci/dma subsystem)
525 desc[frag++].addr_lo =
526 pci_map_page(etdev->pdev,
528 frags[i - 1].page_offset,
537 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
538 if (++etdev->tx_ring.since_irq ==
539 PARM_TX_NUM_BUFS_DEF) {
540 /* Last element & Interrupt flag */
541 desc[frag - 1].flags = 0x5;
542 etdev->tx_ring.since_irq = 0;
543 } else { /* Last element */
544 desc[frag - 1].flags = 0x1;
547 desc[frag - 1].flags = 0x5;
549 desc[0].flags |= 2; /* First element flag */
551 tcb->index_start = etdev->tx_ring.send_idx;
554 spin_lock_irqsave(&etdev->SendHWLock, flags);
556 thiscopy = NUM_DESC_PER_RING_TX -
557 INDEX10(etdev->tx_ring.send_idx);
559 if (thiscopy >= frag) {
563 remainder = frag - thiscopy;
566 memcpy(etdev->tx_ring.tx_desc_ring +
567 INDEX10(etdev->tx_ring.send_idx), desc,
568 sizeof(struct tx_desc) * thiscopy);
570 add_10bit(&etdev->tx_ring.send_idx, thiscopy);
572 if (INDEX10(etdev->tx_ring.send_idx)== 0 ||
573 INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
574 etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
575 etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
579 memcpy(etdev->tx_ring.tx_desc_ring,
581 sizeof(struct tx_desc) * remainder);
583 add_10bit(&etdev->tx_ring.send_idx, remainder);
586 if (INDEX10(etdev->tx_ring.send_idx) == 0) {
587 if (etdev->tx_ring.send_idx)
588 tcb->index = NUM_DESC_PER_RING_TX - 1;
590 tcb->index= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
592 tcb->index = etdev->tx_ring.send_idx - 1;
594 spin_lock(&etdev->TCBSendQLock);
596 if (etdev->tx_ring.send_tail)
597 etdev->tx_ring.send_tail->next = tcb;
599 etdev->tx_ring.send_head = tcb;
601 etdev->tx_ring.send_tail = tcb;
603 WARN_ON(tcb->next != NULL);
605 etdev->tx_ring.used++;
607 spin_unlock(&etdev->TCBSendQLock);
609 /* Write the new write pointer back to the device. */
610 writel(etdev->tx_ring.send_idx,
611 &etdev->regs->txdma.service_request);
613 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
614 * timer to wake us up if this packet isn't followed by N more.
616 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
617 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
618 &etdev->regs->global.watchdog_timer);
620 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
627 * et131x_free_send_packet - Recycle a struct tcb
628 * @etdev: pointer to our adapter
629 * @tcb: pointer to struct tcb
631 * Complete the packet if necessary
632 * Assumption - Send spinlock has been acquired
634 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
638 struct tx_desc *desc = NULL;
639 struct net_device_stats *stats = &etdev->net_stats;
641 if (tcb->flags & fMP_DEST_BROAD)
642 atomic_inc(&etdev->Stats.brdcstxmt);
643 else if (tcb->flags & fMP_DEST_MULTI)
644 atomic_inc(&etdev->Stats.multixmt);
646 atomic_inc(&etdev->Stats.unixmt);
649 stats->tx_bytes += tcb->skb->len;
651 /* Iterate through the TX descriptors on the ring
652 * corresponding to this packet and umap the fragments
656 desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
657 INDEX10(tcb->index_start));
659 pci_unmap_single(etdev->pdev,
661 desc->len_vlan, PCI_DMA_TODEVICE);
663 add_10bit(&tcb->index_start, 1);
664 if (INDEX10(tcb->index_start) >=
665 NUM_DESC_PER_RING_TX) {
666 tcb->index_start &= ~ET_DMA10_MASK;
667 tcb->index_start ^= ET_DMA10_WRAP;
669 } while (desc != (etdev->tx_ring.tx_desc_ring +
670 INDEX10(tcb->index)));
672 dev_kfree_skb_any(tcb->skb);
675 memset(tcb, 0, sizeof(struct tcb));
677 /* Add the TCB to the Ready Q */
678 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
680 etdev->Stats.opackets++;
682 if (etdev->tx_ring.tcb_qtail)
683 etdev->tx_ring.tcb_qtail->next = tcb;
685 /* Apparently ready Q is empty. */
686 etdev->tx_ring.tcb_qhead = tcb;
688 etdev->tx_ring.tcb_qtail = tcb;
690 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
691 WARN_ON(etdev->tx_ring.used < 0);
695 * et131x_free_busy_send_packets - Free and complete the stopped active sends
696 * @etdev: pointer to our adapter
698 * Assumption - Send spinlock has been acquired
700 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
706 /* Any packets being sent? Check the first TCB on the send list */
707 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
709 tcb = etdev->tx_ring.send_head;
711 while ((tcb != NULL) && (freed < NUM_TCB)) {
712 struct tcb *next = tcb->next;
714 etdev->tx_ring.send_head = next;
717 etdev->tx_ring.send_tail = NULL;
719 etdev->tx_ring.used--;
721 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
724 et131x_free_send_packet(etdev, tcb);
726 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
728 tcb = etdev->tx_ring.send_head;
731 WARN_ON(freed == NUM_TCB);
733 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
735 etdev->tx_ring.used = 0;
739 * et131x_handle_send_interrupt - Interrupt handler for sending processing
740 * @etdev: pointer to our adapter
742 * Re-claim the send resources, complete sends and get more to send from
743 * the send wait queue.
745 * Assumption - Send spinlock has been acquired
747 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
754 serviced = readl(&etdev->regs->txdma.NewServiceComplete);
755 index = INDEX10(serviced);
757 /* Has the ring wrapped? Process any descriptors that do not have
758 * the same "wrap" indicator as the current completion indicator
760 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
762 tcb = etdev->tx_ring.send_head;
765 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
766 index < INDEX10(tcb->index)) {
767 etdev->tx_ring.used--;
768 etdev->tx_ring.send_head = tcb->next;
769 if (tcb->next == NULL)
770 etdev->tx_ring.send_tail = NULL;
772 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
773 et131x_free_send_packet(etdev, tcb);
774 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
776 /* Goto the next packet */
777 tcb = etdev->tx_ring.send_head;
780 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
781 && index > (tcb->index & ET_DMA10_MASK)) {
782 etdev->tx_ring.used--;
783 etdev->tx_ring.send_head = tcb->next;
784 if (tcb->next == NULL)
785 etdev->tx_ring.send_tail = NULL;
787 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
788 et131x_free_send_packet(etdev, tcb);
789 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
791 /* Goto the next packet */
792 tcb = etdev->tx_ring.send_head;
795 /* Wake up the queue when we hit a low-water mark */
796 if (etdev->tx_ring.used <= (NUM_TCB / 3))
797 netif_wake_queue(etdev->netdev);
799 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);