Staging: et131x: MPSend macros
[safe/jmp/linux-2.6] / drivers / staging / et131x / et1310_tx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_tx.c - Routines used to perform data transmission.
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
61
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
67
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/in.h>
76 #include <linux/delay.h>
77 #include <linux/io.h>
78 #include <linux/bitops.h>
79 #include <asm/system.h>
80
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
86
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
90
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
94
95 #include "et1310_tx.h"
96
97 /* Data for debugging facilities */
98 #ifdef CONFIG_ET131X_DEBUG
99 extern dbg_info_t *et131x_dbginfo;
100 #endif /* CONFIG_ET131X_DEBUG */
101
102 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
103 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
104 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
105                                            PMP_TCB pMpTcb);
106 static int et131x_send_packet(struct sk_buff *skb,
107                               struct et131x_adapter *etdev);
108 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
109
110 /**
111  * et131x_tx_dma_memory_alloc
112  * @adapter: pointer to our private adapter structure
113  *
114  * Returns 0 on success and errno on failure (as defined in errno.h).
115  *
116  * Allocates memory that will be visible both to the device and to the CPU.
117  * The OS will pass us packets, pointers to which we will insert in the Tx
118  * Descriptor queue. The device will read this queue to find the packets in
119  * memory. The device will update the "status" in memory each time it xmits a
120  * packet.
121  */
122 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
123 {
124         int desc_size = 0;
125         TX_RING_t *tx_ring = &adapter->TxRing;
126
127         DBG_ENTER(et131x_dbginfo);
128
129         /* Allocate memory for the TCB's (Transmit Control Block) */
130         adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
131                                                       GFP_ATOMIC | GFP_DMA);
132         if (!adapter->TxRing.MpTcbMem) {
133                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
134                 DBG_LEAVE(et131x_dbginfo);
135                 return -ENOMEM;
136         }
137
138         /* Allocate enough memory for the Tx descriptor ring, and allocate
139          * some extra so that the ring can be aligned on a 4k boundary.
140          */
141         desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
142         tx_ring->pTxDescRingVa =
143             (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
144                                                     &tx_ring->pTxDescRingPa);
145         if (!adapter->TxRing.pTxDescRingVa) {
146                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
147                 DBG_LEAVE(et131x_dbginfo);
148                 return -ENOMEM;
149         }
150
151         /* Save physical address
152          *
153          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
154          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
155          * are ever returned, make sure the high part is retrieved here before
156          * storing the adjusted address.
157          */
158         tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
159
160         /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
161         et131x_align_allocated_memory(adapter,
162                                       &tx_ring->pTxDescRingAdjustedPa,
163                                       &tx_ring->TxDescOffset, 0x0FFF);
164
165         tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
166
167         /* Allocate memory for the Tx status block */
168         tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
169                                                     sizeof(TX_STATUS_BLOCK_t),
170                                                     &tx_ring->pTxStatusPa);
171         if (!adapter->TxRing.pTxStatusPa) {
172                 DBG_ERROR(et131x_dbginfo,
173                           "Cannot alloc memory for Tx status block\n");
174                 DBG_LEAVE(et131x_dbginfo);
175                 return -ENOMEM;
176         }
177
178         /* Allocate memory for a dummy buffer */
179         tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
180                                                       NIC_MIN_PACKET_SIZE,
181                                                       &tx_ring->pTxDummyBlkPa);
182         if (!adapter->TxRing.pTxDummyBlkPa) {
183                 DBG_ERROR(et131x_dbginfo,
184                           "Cannot alloc memory for Tx dummy buffer\n");
185                 DBG_LEAVE(et131x_dbginfo);
186                 return -ENOMEM;
187         }
188
189         DBG_LEAVE(et131x_dbginfo);
190         return 0;
191 }
192
193 /**
194  * et131x_tx_dma_memory_free - Free all memory allocated within this module
195  * @adapter: pointer to our private adapter structure
196  *
197  * Returns 0 on success and errno on failure (as defined in errno.h).
198  */
199 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
200 {
201         int desc_size = 0;
202
203         DBG_ENTER(et131x_dbginfo);
204
205         if (adapter->TxRing.pTxDescRingVa) {
206                 /* Free memory relating to Tx rings here */
207                 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
208
209                 desc_size =
210                     (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
211
212                 pci_free_consistent(adapter->pdev,
213                                     desc_size,
214                                     adapter->TxRing.pTxDescRingVa,
215                                     adapter->TxRing.pTxDescRingPa);
216
217                 adapter->TxRing.pTxDescRingVa = NULL;
218         }
219
220         /* Free memory for the Tx status block */
221         if (adapter->TxRing.pTxStatusVa) {
222                 pci_free_consistent(adapter->pdev,
223                                     sizeof(TX_STATUS_BLOCK_t),
224                                     adapter->TxRing.pTxStatusVa,
225                                     adapter->TxRing.pTxStatusPa);
226
227                 adapter->TxRing.pTxStatusVa = NULL;
228         }
229
230         /* Free memory for the dummy buffer */
231         if (adapter->TxRing.pTxDummyBlkVa) {
232                 pci_free_consistent(adapter->pdev,
233                                     NIC_MIN_PACKET_SIZE,
234                                     adapter->TxRing.pTxDummyBlkVa,
235                                     adapter->TxRing.pTxDummyBlkPa);
236
237                 adapter->TxRing.pTxDummyBlkVa = NULL;
238         }
239
240         /* Free the memory for MP_TCB structures */
241         kfree(adapter->TxRing.MpTcbMem);
242
243         DBG_LEAVE(et131x_dbginfo);
244 }
245
246 /**
247  * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
248  * @adapter: pointer to our private adapter structure
249  */
250 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
251 {
252         struct _TXDMA_t __iomem *pTxDma = &etdev->CSRAddress->txdma;
253
254         DBG_ENTER(et131x_dbginfo);
255
256         /* Load the hardware with the start of the transmit descriptor ring. */
257         writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
258                &pTxDma->pr_base_hi);
259         writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
260                &pTxDma->pr_base_lo);
261
262         /* Initialise the transmit DMA engine */
263         writel(NUM_DESC_PER_RING_TX - 1, &pTxDma->pr_num_des.value);
264
265         /* Load the completion writeback physical address
266          *
267          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
268          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
269          * are ever returned, make sure the high part is retrieved here before
270          * storing the adjusted address.
271          */
272         writel(0, &pTxDma->dma_wb_base_hi);
273         writel(etdev->TxRing.pTxStatusPa, &pTxDma->dma_wb_base_lo);
274
275         memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
276
277         writel(0, &pTxDma->service_request.value);
278         etdev->TxRing.txDmaReadyToSend.value = 0;
279
280         DBG_LEAVE(et131x_dbginfo);
281 }
282
283 /**
284  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
285  * @etdev: pointer to our adapter structure
286  */
287 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
288 {
289         DBG_ENTER(et131x_dbginfo);
290
291         /* Setup the tramsmit dma configuration register */
292         writel(0x101, &etdev->CSRAddress->txdma.csr.value);
293
294         DBG_LEAVE(et131x_dbginfo);
295 }
296
297 /**
298  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
299  * @etdev: pointer to our adapter structure
300  *
301  * Mainly used after a return to the D0 (full-power) state from a lower state.
302  */
303 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
304 {
305         DBG_ENTER(et131x_dbginfo);
306
307         if (etdev->RegistryPhyLoopbk) {
308                 /* TxDMA is disabled for loopback operation. */
309                 writel(0x101, &etdev->CSRAddress->txdma.csr.value);
310         } else {
311                 TXDMA_CSR_t csr = { 0 };
312
313                 /* Setup the transmit dma configuration register for normal
314                  * operation
315                  */
316                 csr.bits.sngl_epkt_mode = 1;
317                 csr.bits.halt = 0;
318                 csr.bits.cache_thrshld = etdev->RegistryDMACache;
319                 writel(csr.value, &etdev->CSRAddress->txdma.csr.value);
320         }
321
322         DBG_LEAVE(et131x_dbginfo);
323 }
324
325 /**
326  * et131x_init_send - Initialize send data structures
327  * @adapter: pointer to our private adapter structure
328  */
329 void et131x_init_send(struct et131x_adapter *adapter)
330 {
331         PMP_TCB pMpTcb;
332         uint32_t TcbCount;
333         TX_RING_t *tx_ring;
334
335         DBG_ENTER(et131x_dbginfo);
336
337         /* Setup some convenience pointers */
338         tx_ring = &adapter->TxRing;
339         pMpTcb = adapter->TxRing.MpTcbMem;
340
341         tx_ring->TCBReadyQueueHead = pMpTcb;
342
343         /* Go through and set up each TCB */
344         for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
345                 memset(pMpTcb, 0, sizeof(MP_TCB));
346
347                 /* Set the link pointer in HW TCB to the next TCB in the
348                  * chain.  If this is the last TCB in the chain, also set the
349                  * tail pointer.
350                  */
351                 if (TcbCount < NUM_TCB - 1) {
352                         pMpTcb->Next = pMpTcb + 1;
353                 } else {
354                         tx_ring->TCBReadyQueueTail = pMpTcb;
355                         pMpTcb->Next = (PMP_TCB) NULL;
356                 }
357
358                 pMpTcb++;
359         }
360
361         /* Curr send queue should now be empty */
362         tx_ring->CurrSendHead = (PMP_TCB) NULL;
363         tx_ring->CurrSendTail = (PMP_TCB) NULL;
364
365         INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
366
367         DBG_LEAVE(et131x_dbginfo);
368 }
369
370 /**
371  * et131x_send_packets - This function is called by the OS to send packets
372  * @skb: the packet(s) to send
373  * @netdev:device on which to TX the above packet(s)
374  *
375  * Return 0 in almost all cases; non-zero value in extreme hard failure only
376  */
377 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
378 {
379         int status = 0;
380         struct et131x_adapter *etdev = NULL;
381
382         DBG_TX_ENTER(et131x_dbginfo);
383
384         etdev = netdev_priv(netdev);
385
386         /* Send these packets
387          *
388          * NOTE: The Linux Tx entry point is only given one packet at a time
389          * to Tx, so the PacketCount and it's array used makes no sense here
390          */
391
392         /* Queue is not empty or TCB is not available */
393         if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
394             MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
395                 /* NOTE: If there's an error on send, no need to queue the
396                  * packet under Linux; if we just send an error up to the
397                  * netif layer, it will resend the skb to us.
398                  */
399                 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
400                 status = -ENOMEM;
401         } else {
402                 /* We need to see if the link is up; if it's not, make the
403                  * netif layer think we're good and drop the packet
404                  */
405                 /*
406                  * if( MP_SHOULD_FAIL_SEND( etdev ) ||
407                  *  etdev->DriverNoPhyAccess )
408                  */
409                 if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess
410                     || !netif_carrier_ok(netdev)) {
411                         DBG_VERBOSE(et131x_dbginfo,
412                                 "Can't Tx, Link is DOWN; drop the packet\n");
413
414                         dev_kfree_skb_any(skb);
415                         skb = NULL;
416
417                         etdev->net_stats.tx_dropped++;
418                 } else {
419                         status = et131x_send_packet(skb, etdev);
420
421                         if (status == -ENOMEM) {
422
423                                 /* NOTE: If there's an error on send, no need
424                                  * to queue the packet under Linux; if we just
425                                  * send an error up to the netif layer, it
426                                  * will resend the skb to us.
427                                  */
428                                 DBG_WARNING(et131x_dbginfo,
429                                             "Resources problem, Queue tx packet\n");
430                         } else if (status != 0) {
431                                 /* On any other error, make netif think we're
432                                  * OK and drop the packet
433                                  */
434                                 DBG_WARNING(et131x_dbginfo,
435                                             "General error, drop packet\n");
436
437                                 dev_kfree_skb_any(skb);
438                                 skb = NULL;
439
440                                 etdev->net_stats.tx_dropped++;
441                         }
442                 }
443         }
444
445         DBG_TX_LEAVE(et131x_dbginfo);
446         return status;
447 }
448
449 /**
450  * et131x_send_packet - Do the work to send a packet
451  * @skb: the packet(s) to send
452  * @etdev: a pointer to the device's private adapter structure
453  *
454  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
455  *
456  * Assumption: Send spinlock has been acquired
457  */
458 static int et131x_send_packet(struct sk_buff *skb,
459                               struct et131x_adapter *etdev)
460 {
461         int status = 0;
462         PMP_TCB pMpTcb = NULL;
463         uint16_t *pShBufVa;
464         unsigned long flags;
465
466         DBG_TX_ENTER(et131x_dbginfo);
467
468         /* Is our buffer scattered, or continuous? */
469         if (skb_shinfo(skb)->nr_frags == 0) {
470                 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
471         } else {
472                 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
473                        skb_shinfo(skb)->nr_frags);
474         }
475
476         /* All packets must have at least a MAC address and a protocol type */
477         if (skb->len < ETH_HLEN) {
478                 DBG_ERROR(et131x_dbginfo,
479                           "Packet size < ETH_HLEN (14 bytes)\n");
480                 DBG_LEAVE(et131x_dbginfo);
481                 return -EIO;
482         }
483
484         /* Get a TCB for this packet */
485         spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
486
487         pMpTcb = etdev->TxRing.TCBReadyQueueHead;
488
489         if (pMpTcb == NULL) {
490                 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
491
492                 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
493                 DBG_TX_LEAVE(et131x_dbginfo);
494                 return -ENOMEM;
495         }
496
497         etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
498
499         if (etdev->TxRing.TCBReadyQueueHead == NULL)
500                 etdev->TxRing.TCBReadyQueueTail = NULL;
501
502         spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
503
504         pMpTcb->PacketLength = skb->len;
505         pMpTcb->Packet = skb;
506
507         if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
508                 pShBufVa = (uint16_t *) skb->data;
509
510                 if ((pShBufVa[0] == 0xffff) &&
511                     (pShBufVa[1] == 0xffff) && (pShBufVa[2] == 0xffff)) {
512                         MP_SET_FLAG(pMpTcb, fMP_DEST_BROAD);
513                 } else if ((pShBufVa[0] & 0x3) == 0x0001) {
514                         MP_SET_FLAG(pMpTcb, fMP_DEST_MULTI);
515                 }
516         }
517
518         pMpTcb->Next = NULL;
519
520         /* Call the NIC specific send handler. */
521         if (status == 0)
522                 status = nic_send_packet(etdev, pMpTcb);
523
524         if (status != 0) {
525                 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
526
527                 if (etdev->TxRing.TCBReadyQueueTail) {
528                         etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
529                 } else {
530                         /* Apparently ready Q is empty. */
531                         etdev->TxRing.TCBReadyQueueHead = pMpTcb;
532                 }
533
534                 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
535
536                 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
537
538                 DBG_TX_LEAVE(et131x_dbginfo);
539                 return status;
540         }
541
542         DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB);
543
544         DBG_TX_LEAVE(et131x_dbginfo);
545         return 0;
546 }
547
548 /**
549  * nic_send_packet - NIC specific send handler for version B silicon.
550  * @etdev: pointer to our adapter
551  * @pMpTcb: pointer to MP_TCB
552  *
553  * Returns 0 or errno.
554  */
555 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
556 {
557         uint32_t loopIndex;
558         TX_DESC_ENTRY_t CurDesc[24];
559         uint32_t FragmentNumber = 0;
560         uint32_t iThisCopy, iRemainder;
561         struct sk_buff *pPacket = pMpTcb->Packet;
562         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
563         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
564         unsigned long flags;
565
566         DBG_TX_ENTER(et131x_dbginfo);
567
568         /* Part of the optimizations of this send routine restrict us to
569          * sending 24 fragments at a pass.  In practice we should never see
570          * more than 5 fragments.
571          *
572          * NOTE: The older version of this function (below) can handle any
573          * number of fragments. If needed, we can call this function,
574          * although it is less efficient.
575          */
576         if (FragListCount > 23) {
577                 DBG_TX_LEAVE(et131x_dbginfo);
578                 return -EIO;
579         }
580
581         memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
582
583         for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
584                 /* If there is something in this element, lets get a
585                  * descriptor from the ring and get the necessary data
586                  */
587                 if (loopIndex == 0) {
588                         /* If the fragments are smaller than a standard MTU,
589                          * then map them to a single descriptor in the Tx
590                          * Desc ring. However, if they're larger, as is
591                          * possible with support for jumbo packets, then
592                          * split them each across 2 descriptors.
593                          *
594                          * This will work until we determine why the hardware
595                          * doesn't seem to like large fragments.
596                          */
597                         if ((pPacket->len - pPacket->data_len) <= 1514) {
598                                 DBG_TX(et131x_dbginfo,
599                                        "Got packet of length %d, "
600                                        "filling desc entry %d, "
601                                        "TCB: 0x%p\n",
602                                        (pPacket->len - pPacket->data_len),
603                                        etdev->TxRing.txDmaReadyToSend.bits.
604                                        val, pMpTcb);
605
606                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
607
608                                 CurDesc[FragmentNumber].word2.bits.
609                                     length_in_bytes =
610                                     pPacket->len - pPacket->data_len;
611
612                                 /* NOTE: Here, the dma_addr_t returned from
613                                  * pci_map_single() is implicitly cast as a
614                                  * uint32_t. Although dma_addr_t can be
615                                  * 64-bit, the address returned by
616                                  * pci_map_single() is always 32-bit
617                                  * addressable (as defined by the pci/dma
618                                  * subsystem)
619                                  */
620                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
621                                     pci_map_single(etdev->pdev,
622                                                    pPacket->data,
623                                                    pPacket->len -
624                                                    pPacket->data_len,
625                                                    PCI_DMA_TODEVICE);
626                         } else {
627                                 DBG_TX(et131x_dbginfo,
628                                        "Got packet of length %d, "
629                                        "filling desc entry %d, "
630                                        "TCB: 0x%p\n",
631                                        (pPacket->len - pPacket->data_len),
632                                        etdev->TxRing.txDmaReadyToSend.bits.
633                                        val, pMpTcb);
634
635                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
636
637                                 CurDesc[FragmentNumber].word2.bits.
638                                     length_in_bytes =
639                                     ((pPacket->len - pPacket->data_len) / 2);
640
641                                 /* NOTE: Here, the dma_addr_t returned from
642                                  * pci_map_single() is implicitly cast as a
643                                  * uint32_t. Although dma_addr_t can be
644                                  * 64-bit, the address returned by
645                                  * pci_map_single() is always 32-bit
646                                  * addressable (as defined by the pci/dma
647                                  * subsystem)
648                                  */
649                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
650                                     pci_map_single(etdev->pdev,
651                                                    pPacket->data,
652                                                    ((pPacket->len -
653                                                      pPacket->data_len) / 2),
654                                                    PCI_DMA_TODEVICE);
655                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
656
657                                 CurDesc[FragmentNumber].word2.bits.
658                                     length_in_bytes =
659                                     ((pPacket->len - pPacket->data_len) / 2);
660
661                                 /* NOTE: Here, the dma_addr_t returned from
662                                  * pci_map_single() is implicitly cast as a
663                                  * uint32_t. Although dma_addr_t can be
664                                  * 64-bit, the address returned by
665                                  * pci_map_single() is always 32-bit
666                                  * addressable (as defined by the pci/dma
667                                  * subsystem)
668                                  */
669                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
670                                     pci_map_single(etdev->pdev,
671                                                    pPacket->data +
672                                                    ((pPacket->len -
673                                                      pPacket->data_len) / 2),
674                                                    ((pPacket->len -
675                                                      pPacket->data_len) / 2),
676                                                    PCI_DMA_TODEVICE);
677                         }
678                 } else {
679                         DBG_TX(et131x_dbginfo,
680                                "Got packet of length %d,"
681                                "filling desc entry %d\n"
682                                "TCB: 0x%p\n",
683                                pFragList[loopIndex].size,
684                                etdev->TxRing.txDmaReadyToSend.bits.val,
685                                pMpTcb);
686
687                         CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
688
689                         CurDesc[FragmentNumber].word2.bits.length_in_bytes =
690                             pFragList[loopIndex - 1].size;
691
692                         /* NOTE: Here, the dma_addr_t returned from
693                          * pci_map_page() is implicitly cast as a uint32_t.
694                          * Although dma_addr_t can be 64-bit, the address
695                          * returned by pci_map_page() is always 32-bit
696                          * addressable (as defined by the pci/dma subsystem)
697                          */
698                         CurDesc[FragmentNumber++].DataBufferPtrLow =
699                             pci_map_page(etdev->pdev,
700                                          pFragList[loopIndex - 1].page,
701                                          pFragList[loopIndex - 1].page_offset,
702                                          pFragList[loopIndex - 1].size,
703                                          PCI_DMA_TODEVICE);
704                 }
705         }
706
707         if (FragmentNumber == 0) {
708                 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
709                 return -EIO;
710         }
711
712         if (etdev->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
713                 if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
714                     etdev->RegistryTxNumBuffers) {
715                         CurDesc[FragmentNumber - 1].word3.value = 0x5;
716                         etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
717                 } else {
718                         CurDesc[FragmentNumber - 1].word3.value = 0x1;
719                 }
720         } else {
721                 CurDesc[FragmentNumber - 1].word3.value = 0x5;
722         }
723
724         CurDesc[0].word3.bits.f = 1;
725
726         pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
727         pMpTcb->PacketStaleCount = 0;
728
729         spin_lock_irqsave(&etdev->SendHWLock, flags);
730
731         iThisCopy =
732             NUM_DESC_PER_RING_TX - etdev->TxRing.txDmaReadyToSend.bits.val;
733
734         if (iThisCopy >= FragmentNumber) {
735                 iRemainder = 0;
736                 iThisCopy = FragmentNumber;
737         } else {
738                 iRemainder = FragmentNumber - iThisCopy;
739         }
740
741         memcpy(etdev->TxRing.pTxDescRingVa +
742                etdev->TxRing.txDmaReadyToSend.bits.val, CurDesc,
743                sizeof(TX_DESC_ENTRY_t) * iThisCopy);
744
745         etdev->TxRing.txDmaReadyToSend.bits.val += iThisCopy;
746
747         if ((etdev->TxRing.txDmaReadyToSend.bits.val == 0) ||
748             (etdev->TxRing.txDmaReadyToSend.bits.val ==
749              NUM_DESC_PER_RING_TX)) {
750                 if (etdev->TxRing.txDmaReadyToSend.bits.wrap)
751                         etdev->TxRing.txDmaReadyToSend.value = 0;
752                 else
753                         etdev->TxRing.txDmaReadyToSend.value = 0x400;
754         }
755
756         if (iRemainder) {
757                 memcpy(etdev->TxRing.pTxDescRingVa,
758                        CurDesc + iThisCopy,
759                        sizeof(TX_DESC_ENTRY_t) * iRemainder);
760
761                 etdev->TxRing.txDmaReadyToSend.bits.val += iRemainder;
762         }
763
764         if (etdev->TxRing.txDmaReadyToSend.bits.val == 0) {
765                 if (etdev->TxRing.txDmaReadyToSend.value)
766                         pMpTcb->WrIndex.value = NUM_DESC_PER_RING_TX - 1;
767                 else
768                         pMpTcb->WrIndex.value =
769                             0x400 | (NUM_DESC_PER_RING_TX - 1);
770         } else
771                 pMpTcb->WrIndex.value =
772                     etdev->TxRing.txDmaReadyToSend.value - 1;
773
774         spin_lock(&etdev->TCBSendQLock);
775
776         if (etdev->TxRing.CurrSendTail)
777                 etdev->TxRing.CurrSendTail->Next = pMpTcb;
778         else
779                 etdev->TxRing.CurrSendHead = pMpTcb;
780
781         etdev->TxRing.CurrSendTail = pMpTcb;
782
783         DBG_ASSERT(pMpTcb->Next == NULL);
784
785         etdev->TxRing.nBusySend++;
786
787         spin_unlock(&etdev->TCBSendQLock);
788
789         /* Write the new write pointer back to the device. */
790         writel(etdev->TxRing.txDmaReadyToSend.value,
791                &etdev->CSRAddress->txdma.service_request.value);
792
793         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
794          * timer to wake us up if this packet isn't followed by N more.
795          */
796         if (etdev->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
797                 writel(etdev->RegistryTxTimeInterval * NANO_IN_A_MICRO,
798                        &etdev->CSRAddress->global.watchdog_timer);
799         }
800
801         spin_unlock_irqrestore(&etdev->SendHWLock, flags);
802
803         DBG_TX_LEAVE(et131x_dbginfo);
804         return 0;
805 }
806
807 /*
808  * NOTE: For now, keep this older version of NICSendPacket around for
809  * reference, even though it's not used
810  */
811 #if 0
812
813 /**
814  * NICSendPacket - NIC specific send handler.
815  * @etdev: pointer to our adapter
816  * @pMpTcb: pointer to MP_TCB
817  *
818  * Returns 0 on succes, errno on failure.
819  *
820  * This version of the send routine is designed for version A silicon.
821  * Assumption - Send spinlock has been acquired.
822  */
823 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
824 {
825         uint32_t loopIndex, fragIndex, loopEnd;
826         uint32_t iSplitFirstElement = 0;
827         uint32_t SegmentSize = 0;
828         TX_DESC_ENTRY_t CurDesc;
829         TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
830         uint32_t SlotsAvailable;
831         DMA10W_t ServiceComplete;
832         unsigned int flags;
833         struct sk_buff *pPacket = pMpTcb->Packet;
834         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
835         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
836
837         DBG_TX_ENTER(et131x_dbginfo);
838
839         ServiceComplete.value =
840                 readl(&etdev->CSRAddress->txdma.NewServiceComplete.value);
841
842         /*
843          * Attempt to fix TWO hardware bugs:
844          * 1)  NEVER write an odd number of descriptors.
845          * 2)  If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
846          *     packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
847          *     descriptor IN HALF DUPLEX MODE ONLY
848          * NOTE that (2) interacts with (1).  If the packet is less than
849          * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
850          * Therefore if it is even now, it will eventually end up odd, and
851          * so will need adjusting.
852          *
853          * VLAN tags get involved since VLAN tags add another one or two
854          * segments.
855          */
856         DBG_TX(et131x_dbginfo,
857                "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
858
859         if ((etdev->uiDuplexMode == 0)
860             && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
861                 DBG_TX(et131x_dbginfo,
862                        "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
863                 if ((FragListCount & 0x1) == 0) {
864                         DBG_TX(et131x_dbginfo,
865                                "Even number of descs, split 1st elem\n");
866                         iSplitFirstElement = 1;
867                         /* SegmentSize = pFragList[0].size / 2; */
868                         SegmentSize = (pPacket->len - pPacket->data_len) / 2;
869                 }
870         } else if (FragListCount & 0x1) {
871                 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
872
873                 iSplitFirstElement = 1;
874                 /* SegmentSize = pFragList[0].size / 2; */
875                 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
876         }
877
878         spin_lock_irqsave(&etdev->SendHWLock, flags);
879
880         if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
881             ServiceComplete.bits.serv_cpl_wrap) {
882                 /* The ring hasn't wrapped.  Slots available should be
883                  * (RING_SIZE) -  the difference between the two pointers.
884                  */
885                 SlotsAvailable = NUM_DESC_PER_RING_TX -
886                     (etdev->TxRing.txDmaReadyToSend.bits.serv_req -
887                      ServiceComplete.bits.serv_cpl);
888         } else {
889                 /* The ring has wrapped.  Slots available should be the
890                  * difference between the two pointers.
891                  */
892                 SlotsAvailable = ServiceComplete.bits.serv_cpl -
893                     etdev->TxRing.txDmaReadyToSend.bits.serv_req;
894         }
895
896         if ((FragListCount + iSplitFirstElement) > SlotsAvailable) {
897                 DBG_WARNING(et131x_dbginfo,
898                             "Not Enough Space in Tx Desc Ring\n");
899                 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
900                 return -ENOMEM;
901         }
902
903         loopEnd = (FragListCount) + iSplitFirstElement;
904         fragIndex = 0;
905
906         DBG_TX(et131x_dbginfo,
907                "TCB           : 0x%p\n"
908                "Packet (SKB)  : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
909                "FragListCount : %d\t iSplitFirstElement: %d\t loopEnd:%d\n",
910                pMpTcb,
911                pPacket, pPacket->len, pPacket->data_len,
912                FragListCount, iSplitFirstElement, loopEnd);
913
914         for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
915                 if (loopIndex > iSplitFirstElement)
916                         fragIndex++;
917
918                 DBG_TX(et131x_dbginfo,
919                        "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
920                        fragIndex);
921
922                 /* If there is something in this element, let's get a
923                  * descriptor from the ring and get the necessary data
924                  */
925                 DBG_TX(et131x_dbginfo,
926                        "Packet Length %d,"
927                        "filling desc entry %d\n",
928                        pPacket->len,
929                        etdev->TxRing.txDmaReadyToSend.bits.serv_req);
930
931                 /*
932                  * NOTE - Should we do a paranoia check here to make sure the fragment
933                  * actually has a length? It's HIGHLY unlikely the fragment would
934                  * contain no data...
935                  */
936                 if (1) {
937                         /* NOTE - Currently always getting 32-bit addrs, and
938                          * dma_addr_t is only 32-bit, so leave "high" ptr
939                          * value out for now
940                          * CurDesc.DataBufferPtrHigh = 0;
941                          */
942
943                         CurDesc.word2.value = 0;
944                         CurDesc.word3.value = 0;
945
946                         if (fragIndex == 0) {
947                                 if (iSplitFirstElement) {
948                                         DBG_TX(et131x_dbginfo,
949                                                "Split first element: YES\n");
950
951                                         if (loopIndex == 0) {
952                                                 DBG_TX(et131x_dbginfo,
953                                                        "Got fragment of length %d, fragIndex: %d\n",
954                                                        pPacket->len -
955                                                        pPacket->data_len,
956                                                        fragIndex);
957                                                 DBG_TX(et131x_dbginfo,
958                                                        "SegmentSize: %d\n",
959                                                        SegmentSize);
960
961                                                 CurDesc.word2.bits.
962                                                     length_in_bytes =
963                                                     SegmentSize;
964                                                 CurDesc.DataBufferPtrLow =
965                                                     pci_map_single(etdev->
966                                                                    pdev,
967                                                                    pPacket->
968                                                                    data,
969                                                                    SegmentSize,
970                                                                    PCI_DMA_TODEVICE);
971                                                 DBG_TX(et131x_dbginfo,
972                                                        "pci_map_single() returns: 0x%08x\n",
973                                                        CurDesc.
974                                                        DataBufferPtrLow);
975                                         } else {
976                                                 DBG_TX(et131x_dbginfo,
977                                                        "Got fragment of length %d, fragIndex: %d\n",
978                                                        pPacket->len -
979                                                        pPacket->data_len,
980                                                        fragIndex);
981                                                 DBG_TX(et131x_dbginfo,
982                                                        "Leftover Size: %d\n",
983                                                        (pPacket->len -
984                                                         pPacket->data_len -
985                                                         SegmentSize));
986
987                                                 CurDesc.word2.bits.
988                                                     length_in_bytes =
989                                                     ((pPacket->len -
990                                                       pPacket->data_len) -
991                                                      SegmentSize);
992                                                 CurDesc.DataBufferPtrLow =
993                                                     pci_map_single(etdev->
994                                                                    pdev,
995                                                                    (pPacket->
996                                                                     data +
997                                                                     SegmentSize),
998                                                                    (pPacket->
999                                                                     len -
1000                                                                     pPacket->
1001                                                                     data_len -
1002                                                                     SegmentSize),
1003                                                                    PCI_DMA_TODEVICE);
1004                                                 DBG_TX(et131x_dbginfo,
1005                                                        "pci_map_single() returns: 0x%08x\n",
1006                                                        CurDesc.
1007                                                        DataBufferPtrLow);
1008                                         }
1009                                 } else {
1010                                         DBG_TX(et131x_dbginfo,
1011                                                "Split first element: NO\n");
1012
1013                                         CurDesc.word2.bits.length_in_bytes =
1014                                             pPacket->len - pPacket->data_len;
1015
1016                                         CurDesc.DataBufferPtrLow =
1017                                             pci_map_single(etdev->pdev,
1018                                                            pPacket->data,
1019                                                            (pPacket->len -
1020                                                             pPacket->data_len),
1021                                                            PCI_DMA_TODEVICE);
1022                                         DBG_TX(et131x_dbginfo,
1023                                                "pci_map_single() returns: 0x%08x\n",
1024                                                CurDesc.DataBufferPtrLow);
1025                                 }
1026                         } else {
1027
1028                                 CurDesc.word2.bits.length_in_bytes =
1029                                     pFragList[fragIndex - 1].size;
1030                                 CurDesc.DataBufferPtrLow =
1031                                     pci_map_page(etdev->pdev,
1032                                                  pFragList[fragIndex - 1].page,
1033                                                  pFragList[fragIndex -
1034                                                            1].page_offset,
1035                                                  pFragList[fragIndex - 1].size,
1036                                                  PCI_DMA_TODEVICE);
1037                                 DBG_TX(et131x_dbginfo,
1038                                        "pci_map_page() returns: 0x%08x\n",
1039                                        CurDesc.DataBufferPtrLow);
1040                         }
1041
1042                         if (loopIndex == 0) {
1043                                 /* This is the first descriptor of the packet
1044                                  *
1045                                  * Set the "f" bit to indicate this is the
1046                                  * first descriptor in the packet.
1047                                  */
1048                                 DBG_TX(et131x_dbginfo,
1049                                        "This is our FIRST descriptor\n");
1050                                 CurDesc.word3.bits.f = 1;
1051
1052                                 pMpTcb->WrIndexStart =
1053                                     etdev->TxRing.txDmaReadyToSend;
1054                         }
1055
1056                         if ((loopIndex == (loopEnd - 1)) &&
1057                             (etdev->uiDuplexMode ||
1058                              (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1059                                 /* This is the Last descriptor of the packet */
1060                                 DBG_TX(et131x_dbginfo,
1061                                        "THIS is our LAST descriptor\n");
1062
1063                                 if (etdev->uiLinkSpeed ==
1064                                     TRUEPHY_SPEED_1000MBPS) {
1065                                         if (++etdev->TxRing.
1066                                             TxPacketsSinceLastinterrupt >=
1067                                             etdev->RegistryTxNumBuffers) {
1068                                                 CurDesc.word3.value = 0x5;
1069                                                 etdev->TxRing.
1070                                                     TxPacketsSinceLastinterrupt
1071                                                     = 0;
1072                                         } else {
1073                                                 CurDesc.word3.value = 0x1;
1074                                         }
1075                                 } else {
1076                                         CurDesc.word3.value = 0x5;
1077                                 }
1078
1079                                 /* Following index will be used during freeing
1080                                  * of packet
1081                                  */
1082                                 pMpTcb->WrIndex =
1083                                     etdev->TxRing.txDmaReadyToSend;
1084                                 pMpTcb->PacketStaleCount = 0;
1085                         }
1086
1087                         /* Copy the descriptor (filled above) into the
1088                          * descriptor ring at the next free entry.  Advance
1089                          * the "next free entry" variable
1090                          */
1091                         memcpy(etdev->TxRing.pTxDescRingVa +
1092                                etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1093                                &CurDesc, sizeof(TX_DESC_ENTRY_t));
1094
1095                         CurDescPostCopy =
1096                             etdev->TxRing.pTxDescRingVa +
1097                             etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1098
1099                         DBG_TX(et131x_dbginfo,
1100                                "CURRENT DESCRIPTOR\n"
1101                                "\tAddress           : 0x%p\n"
1102                                "\tDataBufferPtrHigh : 0x%08x\n"
1103                                "\tDataBufferPtrLow  : 0x%08x\n"
1104                                "\tword2             : 0x%08x\n"
1105                                "\tword3             : 0x%08x\n",
1106                                CurDescPostCopy,
1107                                CurDescPostCopy->DataBufferPtrHigh,
1108                                CurDescPostCopy->DataBufferPtrLow,
1109                                CurDescPostCopy->word2.value,
1110                                CurDescPostCopy->word3.value);
1111
1112                         if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1113                             NUM_DESC_PER_RING_TX) {
1114                                 if (etdev->TxRing.txDmaReadyToSend.bits.
1115                                     serv_req_wrap) {
1116                                         etdev->TxRing.txDmaReadyToSend.
1117                                             value = 0;
1118                                 } else {
1119                                         etdev->TxRing.txDmaReadyToSend.
1120                                             value = 0x400;
1121                                 }
1122                         }
1123                 }
1124         }
1125
1126         if (etdev->uiDuplexMode == 0 &&
1127             pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1128                 /* NOTE - Same 32/64-bit issue as above... */
1129                 CurDesc.DataBufferPtrHigh = 0x0;
1130                 CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa;
1131                 CurDesc.word2.value = 0;
1132
1133                 if (etdev->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
1134                         if (++etdev->TxRing.TxPacketsSinceLastinterrupt >=
1135                             etdev->RegistryTxNumBuffers) {
1136                                 CurDesc.word3.value = 0x5;
1137                                 etdev->TxRing.TxPacketsSinceLastinterrupt =
1138                                     0;
1139                         } else {
1140                                 CurDesc.word3.value = 0x1;
1141                         }
1142                 } else {
1143                         CurDesc.word3.value = 0x5;
1144                 }
1145
1146                 CurDesc.word2.bits.length_in_bytes =
1147                     NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1148
1149                 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend;
1150
1151                 memcpy(etdev->TxRing.pTxDescRingVa +
1152                        etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1153                        &CurDesc, sizeof(TX_DESC_ENTRY_t));
1154
1155                 CurDescPostCopy =
1156                     etdev->TxRing.pTxDescRingVa +
1157                     etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1158
1159                 DBG_TX(et131x_dbginfo,
1160                        "CURRENT DESCRIPTOR\n"
1161                        "\tAddress           : 0x%p\n"
1162                        "\tDataBufferPtrHigh : 0x%08x\n"
1163                        "\tDataBufferPtrLow  : 0x%08x\n"
1164                        "\tword2             : 0x%08x\n"
1165                        "\tword3             : 0x%08x\n",
1166                        CurDescPostCopy,
1167                        CurDescPostCopy->DataBufferPtrHigh,
1168                        CurDescPostCopy->DataBufferPtrLow,
1169                        CurDescPostCopy->word2.value,
1170                        CurDescPostCopy->word3.value);
1171
1172                 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1173                     NUM_DESC_PER_RING_TX) {
1174                         if (etdev->TxRing.txDmaReadyToSend.bits.
1175                             serv_req_wrap) {
1176                                 etdev->TxRing.txDmaReadyToSend.value = 0;
1177                         } else {
1178                                 etdev->TxRing.txDmaReadyToSend.value = 0x400;
1179                         }
1180                 }
1181
1182                 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1183                        /* etdev->TxRing.txDmaReadyToSend.value, */
1184                        etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1185                        NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1186         }
1187
1188         spin_lock(&etdev->TCBSendQLock);
1189
1190         if (etdev->TxRing.CurrSendTail)
1191                 etdev->TxRing.CurrSendTail->Next = pMpTcb;
1192         else
1193                 etdev->TxRing.CurrSendHead = pMpTcb;
1194
1195         etdev->TxRing.CurrSendTail = pMpTcb;
1196
1197         DBG_ASSERT(pMpTcb->Next == NULL);
1198
1199         etdev->TxRing.nBusySend++;
1200
1201         spin_unlock(&etdev->TCBSendQLock);
1202
1203         /* Write the new write pointer back to the device. */
1204         writel(etdev->TxRing.txDmaReadyToSend.value,
1205                &etdev->CSRAddress->txdma.service_request.value);
1206
1207 #ifdef CONFIG_ET131X_DEBUG
1208         DumpDeviceBlock(DBG_TX_ON, etdev, 1);
1209 #endif
1210
1211         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
1212          * timer to wake us up if this packet isn't followed by N more.
1213          */
1214         if (etdev->uiLinkSpeed == TRUEPHY_SPEED_1000MBPS) {
1215                 writel(etdev->RegistryTxTimeInterval * NANO_IN_A_MICRO,
1216                        &etdev->CSRAddress->global.watchdog_timer);
1217         }
1218
1219         spin_unlock_irqrestore(&etdev->SendHWLock, flags);
1220
1221         DBG_TX_LEAVE(et131x_dbginfo);
1222         return 0;
1223 }
1224
1225 #endif
1226
1227 /**
1228  * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1229  * @etdev: pointer to our adapter
1230  * @pMpTcb: pointer to MP_TCB
1231  *
1232  * Assumption - Send spinlock has been acquired
1233  */
1234 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
1235                                                         PMP_TCB pMpTcb)
1236 {
1237         unsigned long flags;
1238         TX_DESC_ENTRY_t *desc = NULL;
1239         struct net_device_stats *stats = &etdev->net_stats;
1240
1241         if (MP_TEST_FLAG(pMpTcb, fMP_DEST_BROAD))
1242                 atomic_inc(&etdev->Stats.brdcstxmt);
1243         else if (MP_TEST_FLAG(pMpTcb, fMP_DEST_MULTI))
1244                 atomic_inc(&etdev->Stats.multixmt);
1245         else
1246                 atomic_inc(&etdev->Stats.unixmt);
1247
1248         if (pMpTcb->Packet) {
1249                 stats->tx_bytes += pMpTcb->Packet->len;
1250
1251                 /* Iterate through the TX descriptors on the ring
1252                  * corresponding to this packet and umap the fragments
1253                  * they point to
1254                  */
1255                 DBG_TX(et131x_dbginfo,
1256                        "Unmap descriptors Here\n"
1257                        "TCB                  : 0x%p\n"
1258                        "TCB Next             : 0x%p\n"
1259                        "TCB PacketLength     : %d\n"
1260                        "TCB WrIndex.value    : 0x%08x\n"
1261                        "TCB WrIndex.bits.val : %d\n"
1262                        "TCB WrIndex.value    : 0x%08x\n"
1263                        "TCB WrIndex.bits.val : %d\n",
1264                        pMpTcb,
1265                        pMpTcb->Next,
1266                        pMpTcb->PacketLength,
1267                        pMpTcb->WrIndexStart.value,
1268                        pMpTcb->WrIndexStart.bits.val,
1269                        pMpTcb->WrIndex.value,
1270                        pMpTcb->WrIndex.bits.val);
1271
1272                 do {
1273                         desc =
1274                             (TX_DESC_ENTRY_t *) (etdev->TxRing.
1275                                                  pTxDescRingVa +
1276                                                  pMpTcb->WrIndexStart.bits.val);
1277
1278                         DBG_TX(et131x_dbginfo,
1279                                "CURRENT DESCRIPTOR\n"
1280                                "\tAddress           : 0x%p\n"
1281                                "\tDataBufferPtrHigh : 0x%08x\n"
1282                                "\tDataBufferPtrLow  : 0x%08x\n"
1283                                "\tword2             : 0x%08x\n"
1284                                "\tword3             : 0x%08x\n",
1285                                desc,
1286                                desc->DataBufferPtrHigh,
1287                                desc->DataBufferPtrLow,
1288                                desc->word2.value,
1289                                desc->word3.value);
1290
1291                         pci_unmap_single(etdev->pdev,
1292                                          desc->DataBufferPtrLow,
1293                                          desc->word2.value, PCI_DMA_TODEVICE);
1294
1295                         if (++pMpTcb->WrIndexStart.bits.val >=
1296                             NUM_DESC_PER_RING_TX) {
1297                                 if (pMpTcb->WrIndexStart.bits.wrap)
1298                                         pMpTcb->WrIndexStart.value = 0;
1299                                 else
1300                                         pMpTcb->WrIndexStart.value = 0x400;
1301                         }
1302                 } while (desc != (etdev->TxRing.pTxDescRingVa +
1303                                 pMpTcb->WrIndex.bits.val));
1304
1305                 DBG_TX(et131x_dbginfo,
1306                        "Free Packet (SKB)   : 0x%p\n", pMpTcb->Packet);
1307
1308                 dev_kfree_skb_any(pMpTcb->Packet);
1309         }
1310
1311         memset(pMpTcb, 0, sizeof(MP_TCB));
1312
1313         /* Add the TCB to the Ready Q */
1314         spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
1315
1316         etdev->Stats.opackets++;
1317
1318         if (etdev->TxRing.TCBReadyQueueTail) {
1319                 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1320         } else {
1321                 /* Apparently ready Q is empty. */
1322                 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
1323         }
1324
1325         etdev->TxRing.TCBReadyQueueTail = pMpTcb;
1326
1327         spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
1328
1329         DBG_ASSERT(etdev->TxRing.nBusySend >= 0);
1330 }
1331
1332 /**
1333  * et131x_free_busy_send_packets - Free and complete the stopped active sends
1334  * @etdev: pointer to our adapter
1335  *
1336  * Assumption - Send spinlock has been acquired
1337  */
1338 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
1339 {
1340         PMP_TCB pMpTcb;
1341         struct list_head *pEntry;
1342         unsigned long flags;
1343         uint32_t FreeCounter = 0;
1344
1345         DBG_ENTER(et131x_dbginfo);
1346
1347         while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
1348                 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1349
1350                 etdev->TxRing.nWaitSend--;
1351                 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1352
1353                 pEntry = etdev->TxRing.SendWaitQueue.next;
1354         }
1355
1356         etdev->TxRing.nWaitSend = 0;
1357
1358         /* Any packets being sent? Check the first TCB on the send list */
1359         spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1360
1361         pMpTcb = etdev->TxRing.CurrSendHead;
1362
1363         while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1364                 PMP_TCB pNext = pMpTcb->Next;
1365
1366                 etdev->TxRing.CurrSendHead = pNext;
1367
1368                 if (pNext == NULL)
1369                         etdev->TxRing.CurrSendTail = NULL;
1370
1371                 etdev->TxRing.nBusySend--;
1372
1373                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1374
1375                 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1376
1377                 FreeCounter++;
1378                 et131x_free_send_packet(etdev, pMpTcb);
1379
1380                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1381
1382                 pMpTcb = etdev->TxRing.CurrSendHead;
1383         }
1384
1385         if (FreeCounter == NUM_TCB) {
1386                 DBG_ERROR(et131x_dbginfo,
1387                     "MpFreeBusySendPackets exited loop for a bad reason\n");
1388                 BUG();
1389         }
1390
1391         spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1392
1393         etdev->TxRing.nBusySend = 0;
1394
1395         DBG_LEAVE(et131x_dbginfo);
1396 }
1397
1398 /**
1399  * et131x_handle_send_interrupt - Interrupt handler for sending processing
1400  * @etdev: pointer to our adapter
1401  *
1402  * Re-claim the send resources, complete sends and get more to send from
1403  * the send wait queue.
1404  *
1405  * Assumption - Send spinlock has been acquired
1406  */
1407 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
1408 {
1409         DBG_TX_ENTER(et131x_dbginfo);
1410
1411         /* Mark as completed any packets which have been sent by the device. */
1412         et131x_update_tcb_list(etdev);
1413
1414         /* If we queued any transmits because we didn't have any TCBs earlier,
1415          * dequeue and send those packets now, as long as we have free TCBs.
1416          */
1417         et131x_check_send_wait_list(etdev);
1418
1419         DBG_TX_LEAVE(et131x_dbginfo);
1420 }
1421
1422 /**
1423  * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1424  * @etdev: pointer to our adapter
1425  *
1426  * Re-claims the send resources and completes sends.  Can also be called as
1427  * part of the NIC send routine when the "ServiceComplete" indication has
1428  * wrapped.
1429  */
1430 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
1431 {
1432         unsigned long flags;
1433         DMA10W_t ServiceComplete;
1434         PMP_TCB pMpTcb;
1435
1436         ServiceComplete.value =
1437             readl(&etdev->CSRAddress->txdma.NewServiceComplete.value);
1438
1439         /* Has the ring wrapped?  Process any descriptors that do not have
1440          * the same "wrap" indicator as the current completion indicator
1441          */
1442         spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1443
1444         pMpTcb = etdev->TxRing.CurrSendHead;
1445         while (pMpTcb &&
1446                ServiceComplete.bits.wrap != pMpTcb->WrIndex.bits.wrap  &&
1447                ServiceComplete.bits.val < pMpTcb->WrIndex.bits.val) {
1448                 etdev->TxRing.nBusySend--;
1449                 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1450                 if (pMpTcb->Next == NULL)
1451                         etdev->TxRing.CurrSendTail = NULL;
1452
1453                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1454                 et131x_free_send_packet(etdev, pMpTcb);
1455                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1456
1457                 /* Goto the next packet */
1458                 pMpTcb = etdev->TxRing.CurrSendHead;
1459         }
1460         while (pMpTcb &&
1461                ServiceComplete.bits.wrap == pMpTcb->WrIndex.bits.wrap &&
1462                ServiceComplete.bits.val > pMpTcb->WrIndex.bits.val) {
1463                 etdev->TxRing.nBusySend--;
1464                 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1465                 if (pMpTcb->Next == NULL)
1466                         etdev->TxRing.CurrSendTail = NULL;
1467
1468                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1469                 et131x_free_send_packet(etdev, pMpTcb);
1470                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1471
1472                 /* Goto the next packet */
1473                 pMpTcb = etdev->TxRing.CurrSendHead;
1474         }
1475
1476         /* Wake up the queue when we hit a low-water mark */
1477         if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
1478                 netif_wake_queue(etdev->netdev);
1479
1480         spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1481 }
1482
1483 /**
1484  * et131x_check_send_wait_list - Helper routine for the interrupt handler
1485  * @etdev: pointer to our adapter
1486  *
1487  * Takes packets from the send wait queue and posts them to the device (if
1488  * room available).
1489  */
1490 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
1491 {
1492         unsigned long flags;
1493
1494         spin_lock_irqsave(&etdev->SendWaitLock, flags);
1495
1496         while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
1497                                 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
1498                 struct list_head *pEntry;
1499
1500                 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1501
1502                 pEntry = etdev->TxRing.SendWaitQueue.next;
1503
1504                 etdev->TxRing.nWaitSend--;
1505
1506                 DBG_WARNING(et131x_dbginfo,
1507                     "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1508                                 etdev->TxRing.nWaitSend);
1509         }
1510
1511         spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1512 }