16aef44d9e0d07ed34b6d347cc74793a34e769a2
[safe/jmp/linux-2.6] / drivers / staging / et131x / et1310_tx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_tx.c - Routines used to perform data transmission.
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
61
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
67
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/in.h>
76 #include <linux/delay.h>
77 #include <linux/io.h>
78 #include <linux/bitops.h>
79 #include <asm/system.h>
80
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
86
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
90
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
94
95 #include "et1310_tx.h"
96
97
98 /* Data for debugging facilities */
99 #ifdef CONFIG_ET131X_DEBUG
100 extern dbg_info_t *et131x_dbginfo;
101 #endif /* CONFIG_ET131X_DEBUG */
102
103 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
104 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
105 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
106                                            PMP_TCB pMpTcb);
107 static int et131x_send_packet(struct sk_buff *skb,
108                               struct et131x_adapter *etdev);
109 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
110
111 /**
112  * et131x_tx_dma_memory_alloc
113  * @adapter: pointer to our private adapter structure
114  *
115  * Returns 0 on success and errno on failure (as defined in errno.h).
116  *
117  * Allocates memory that will be visible both to the device and to the CPU.
118  * The OS will pass us packets, pointers to which we will insert in the Tx
119  * Descriptor queue. The device will read this queue to find the packets in
120  * memory. The device will update the "status" in memory each time it xmits a
121  * packet.
122  */
123 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
124 {
125         int desc_size = 0;
126         TX_RING_t *tx_ring = &adapter->TxRing;
127
128         DBG_ENTER(et131x_dbginfo);
129
130         /* Allocate memory for the TCB's (Transmit Control Block) */
131         adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
132                                                       GFP_ATOMIC | GFP_DMA);
133         if (!adapter->TxRing.MpTcbMem) {
134                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
135                 DBG_LEAVE(et131x_dbginfo);
136                 return -ENOMEM;
137         }
138
139         /* Allocate enough memory for the Tx descriptor ring, and allocate
140          * some extra so that the ring can be aligned on a 4k boundary.
141          */
142         desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
143         tx_ring->pTxDescRingVa =
144             (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
145                                                     &tx_ring->pTxDescRingPa);
146         if (!adapter->TxRing.pTxDescRingVa) {
147                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
148                 DBG_LEAVE(et131x_dbginfo);
149                 return -ENOMEM;
150         }
151
152         /* Save physical address
153          *
154          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
155          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
156          * are ever returned, make sure the high part is retrieved here before
157          * storing the adjusted address.
158          */
159         tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
160
161         /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
162         et131x_align_allocated_memory(adapter,
163                                       &tx_ring->pTxDescRingAdjustedPa,
164                                       &tx_ring->TxDescOffset, 0x0FFF);
165
166         tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
167
168         /* Allocate memory for the Tx status block */
169         tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
170                                                     sizeof(TX_STATUS_BLOCK_t),
171                                                     &tx_ring->pTxStatusPa);
172         if (!adapter->TxRing.pTxStatusPa) {
173                 DBG_ERROR(et131x_dbginfo,
174                           "Cannot alloc memory for Tx status block\n");
175                 DBG_LEAVE(et131x_dbginfo);
176                 return -ENOMEM;
177         }
178
179         /* Allocate memory for a dummy buffer */
180         tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
181                                                       NIC_MIN_PACKET_SIZE,
182                                                       &tx_ring->pTxDummyBlkPa);
183         if (!adapter->TxRing.pTxDummyBlkPa) {
184                 DBG_ERROR(et131x_dbginfo,
185                           "Cannot alloc memory for Tx dummy buffer\n");
186                 DBG_LEAVE(et131x_dbginfo);
187                 return -ENOMEM;
188         }
189
190         DBG_LEAVE(et131x_dbginfo);
191         return 0;
192 }
193
194 /**
195  * et131x_tx_dma_memory_free - Free all memory allocated within this module
196  * @adapter: pointer to our private adapter structure
197  *
198  * Returns 0 on success and errno on failure (as defined in errno.h).
199  */
200 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
201 {
202         int desc_size = 0;
203
204         DBG_ENTER(et131x_dbginfo);
205
206         if (adapter->TxRing.pTxDescRingVa) {
207                 /* Free memory relating to Tx rings here */
208                 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
209
210                 desc_size =
211                     (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
212
213                 pci_free_consistent(adapter->pdev,
214                                     desc_size,
215                                     adapter->TxRing.pTxDescRingVa,
216                                     adapter->TxRing.pTxDescRingPa);
217
218                 adapter->TxRing.pTxDescRingVa = NULL;
219         }
220
221         /* Free memory for the Tx status block */
222         if (adapter->TxRing.pTxStatusVa) {
223                 pci_free_consistent(adapter->pdev,
224                                     sizeof(TX_STATUS_BLOCK_t),
225                                     adapter->TxRing.pTxStatusVa,
226                                     adapter->TxRing.pTxStatusPa);
227
228                 adapter->TxRing.pTxStatusVa = NULL;
229         }
230
231         /* Free memory for the dummy buffer */
232         if (adapter->TxRing.pTxDummyBlkVa) {
233                 pci_free_consistent(adapter->pdev,
234                                     NIC_MIN_PACKET_SIZE,
235                                     adapter->TxRing.pTxDummyBlkVa,
236                                     adapter->TxRing.pTxDummyBlkPa);
237
238                 adapter->TxRing.pTxDummyBlkVa = NULL;
239         }
240
241         /* Free the memory for MP_TCB structures */
242         kfree(adapter->TxRing.MpTcbMem);
243
244         DBG_LEAVE(et131x_dbginfo);
245 }
246
247 /**
248  * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
249  * @etdev: pointer to our private adapter structure
250  */
251 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
252 {
253         struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
254
255         DBG_ENTER(et131x_dbginfo);
256
257         /* Load the hardware with the start of the transmit descriptor ring. */
258         writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
259                &txdma->pr_base_hi);
260         writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
261                &txdma->pr_base_lo);
262
263         /* Initialise the transmit DMA engine */
264         writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
265
266         /* Load the completion writeback physical address
267          *
268          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
269          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
270          * are ever returned, make sure the high part is retrieved here before
271          * storing the adjusted address.
272          */
273         writel(0, &txdma->dma_wb_base_hi);
274         writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
275
276         memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
277
278         writel(0, &txdma->service_request.value);
279         etdev->TxRing.txDmaReadyToSend.value = 0;
280
281         DBG_LEAVE(et131x_dbginfo);
282 }
283
284 /**
285  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
286  * @etdev: pointer to our adapter structure
287  */
288 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
289 {
290         DBG_ENTER(et131x_dbginfo);
291
292         /* Setup the tramsmit dma configuration register */
293         writel(0x101, &etdev->regs->txdma.csr.value);
294
295         DBG_LEAVE(et131x_dbginfo);
296 }
297
298 /**
299  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
300  * @etdev: pointer to our adapter structure
301  *
302  * Mainly used after a return to the D0 (full-power) state from a lower state.
303  */
304 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
305 {
306         DBG_ENTER(et131x_dbginfo);
307
308         if (etdev->RegistryPhyLoopbk) {
309                 /* TxDMA is disabled for loopback operation. */
310                 writel(0x101, &etdev->regs->txdma.csr.value);
311         } else {
312                 TXDMA_CSR_t csr = { 0 };
313
314                 /* Setup the transmit dma configuration register for normal
315                  * operation
316                  */
317                 csr.bits.sngl_epkt_mode = 1;
318                 csr.bits.halt = 0;
319                 csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF;
320                 writel(csr.value, &etdev->regs->txdma.csr.value);
321         }
322
323         DBG_LEAVE(et131x_dbginfo);
324 }
325
326 /**
327  * et131x_init_send - Initialize send data structures
328  * @adapter: pointer to our private adapter structure
329  */
330 void et131x_init_send(struct et131x_adapter *adapter)
331 {
332         PMP_TCB pMpTcb;
333         uint32_t TcbCount;
334         TX_RING_t *tx_ring;
335
336         DBG_ENTER(et131x_dbginfo);
337
338         /* Setup some convenience pointers */
339         tx_ring = &adapter->TxRing;
340         pMpTcb = adapter->TxRing.MpTcbMem;
341
342         tx_ring->TCBReadyQueueHead = pMpTcb;
343
344         /* Go through and set up each TCB */
345         for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
346                 memset(pMpTcb, 0, sizeof(MP_TCB));
347
348                 /* Set the link pointer in HW TCB to the next TCB in the
349                  * chain.  If this is the last TCB in the chain, also set the
350                  * tail pointer.
351                  */
352                 if (TcbCount < NUM_TCB - 1) {
353                         pMpTcb->Next = pMpTcb + 1;
354                 } else {
355                         tx_ring->TCBReadyQueueTail = pMpTcb;
356                         pMpTcb->Next = (PMP_TCB) NULL;
357                 }
358
359                 pMpTcb++;
360         }
361
362         /* Curr send queue should now be empty */
363         tx_ring->CurrSendHead = (PMP_TCB) NULL;
364         tx_ring->CurrSendTail = (PMP_TCB) NULL;
365
366         INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
367
368         DBG_LEAVE(et131x_dbginfo);
369 }
370
371 /**
372  * et131x_send_packets - This function is called by the OS to send packets
373  * @skb: the packet(s) to send
374  * @netdev:device on which to TX the above packet(s)
375  *
376  * Return 0 in almost all cases; non-zero value in extreme hard failure only
377  */
378 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
379 {
380         int status = 0;
381         struct et131x_adapter *etdev = NULL;
382
383         DBG_TX_ENTER(et131x_dbginfo);
384
385         etdev = netdev_priv(netdev);
386
387         /* Send these packets
388          *
389          * NOTE: The Linux Tx entry point is only given one packet at a time
390          * to Tx, so the PacketCount and it's array used makes no sense here
391          */
392
393         /* Queue is not empty or TCB is not available */
394         if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
395             MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
396                 /* NOTE: If there's an error on send, no need to queue the
397                  * packet under Linux; if we just send an error up to the
398                  * netif layer, it will resend the skb to us.
399                  */
400                 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
401                 status = -ENOMEM;
402         } else {
403                 /* We need to see if the link is up; if it's not, make the
404                  * netif layer think we're good and drop the packet
405                  */
406                 /*
407                  * if( MP_SHOULD_FAIL_SEND( etdev ) ||
408                  *  etdev->DriverNoPhyAccess )
409                  */
410                 if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess
411                     || !netif_carrier_ok(netdev)) {
412                         DBG_VERBOSE(et131x_dbginfo,
413                                 "Can't Tx, Link is DOWN; drop the packet\n");
414
415                         dev_kfree_skb_any(skb);
416                         skb = NULL;
417
418                         etdev->net_stats.tx_dropped++;
419                 } else {
420                         status = et131x_send_packet(skb, etdev);
421
422                         if (status == -ENOMEM) {
423
424                                 /* NOTE: If there's an error on send, no need
425                                  * to queue the packet under Linux; if we just
426                                  * send an error up to the netif layer, it
427                                  * will resend the skb to us.
428                                  */
429                                 DBG_WARNING(et131x_dbginfo,
430                                             "Resources problem, Queue tx packet\n");
431                         } else if (status != 0) {
432                                 /* On any other error, make netif think we're
433                                  * OK and drop the packet
434                                  */
435                                 DBG_WARNING(et131x_dbginfo,
436                                             "General error, drop packet\n");
437
438                                 dev_kfree_skb_any(skb);
439                                 skb = NULL;
440
441                                 etdev->net_stats.tx_dropped++;
442                         }
443                 }
444         }
445
446         DBG_TX_LEAVE(et131x_dbginfo);
447         return status;
448 }
449
450 /**
451  * et131x_send_packet - Do the work to send a packet
452  * @skb: the packet(s) to send
453  * @etdev: a pointer to the device's private adapter structure
454  *
455  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
456  *
457  * Assumption: Send spinlock has been acquired
458  */
459 static int et131x_send_packet(struct sk_buff *skb,
460                               struct et131x_adapter *etdev)
461 {
462         int status = 0;
463         PMP_TCB pMpTcb = NULL;
464         uint16_t *shbufva;
465         unsigned long flags;
466
467         DBG_TX_ENTER(et131x_dbginfo);
468
469         /* Is our buffer scattered, or continuous? */
470         if (skb_shinfo(skb)->nr_frags == 0) {
471                 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
472         } else {
473                 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
474                        skb_shinfo(skb)->nr_frags);
475         }
476
477         /* All packets must have at least a MAC address and a protocol type */
478         if (skb->len < ETH_HLEN) {
479                 DBG_ERROR(et131x_dbginfo,
480                           "Packet size < ETH_HLEN (14 bytes)\n");
481                 DBG_LEAVE(et131x_dbginfo);
482                 return -EIO;
483         }
484
485         /* Get a TCB for this packet */
486         spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
487
488         pMpTcb = etdev->TxRing.TCBReadyQueueHead;
489
490         if (pMpTcb == NULL) {
491                 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
492
493                 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
494                 DBG_TX_LEAVE(et131x_dbginfo);
495                 return -ENOMEM;
496         }
497
498         etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
499
500         if (etdev->TxRing.TCBReadyQueueHead == NULL)
501                 etdev->TxRing.TCBReadyQueueTail = NULL;
502
503         spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
504
505         pMpTcb->PacketLength = skb->len;
506         pMpTcb->Packet = skb;
507
508         if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
509                 shbufva = (uint16_t *) skb->data;
510
511                 if ((shbufva[0] == 0xffff) &&
512                     (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
513                         MP_SET_FLAG(pMpTcb, fMP_DEST_BROAD);
514                 } else if ((shbufva[0] & 0x3) == 0x0001) {
515                         MP_SET_FLAG(pMpTcb, fMP_DEST_MULTI);
516                 }
517         }
518
519         pMpTcb->Next = NULL;
520
521         /* Call the NIC specific send handler. */
522         if (status == 0)
523                 status = nic_send_packet(etdev, pMpTcb);
524
525         if (status != 0) {
526                 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
527
528                 if (etdev->TxRing.TCBReadyQueueTail) {
529                         etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
530                 } else {
531                         /* Apparently ready Q is empty. */
532                         etdev->TxRing.TCBReadyQueueHead = pMpTcb;
533                 }
534
535                 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
536
537                 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
538
539                 DBG_TX_LEAVE(et131x_dbginfo);
540                 return status;
541         }
542
543         DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB);
544
545         DBG_TX_LEAVE(et131x_dbginfo);
546         return 0;
547 }
548
549 /**
550  * nic_send_packet - NIC specific send handler for version B silicon.
551  * @etdev: pointer to our adapter
552  * @pMpTcb: pointer to MP_TCB
553  *
554  * Returns 0 or errno.
555  */
556 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
557 {
558         uint32_t loopIndex;
559         TX_DESC_ENTRY_t CurDesc[24];
560         uint32_t FragmentNumber = 0;
561         uint32_t thiscopy, remainder;
562         struct sk_buff *pPacket = pMpTcb->Packet;
563         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
564         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
565         unsigned long flags;
566
567         DBG_TX_ENTER(et131x_dbginfo);
568
569         /* Part of the optimizations of this send routine restrict us to
570          * sending 24 fragments at a pass.  In practice we should never see
571          * more than 5 fragments.
572          *
573          * NOTE: The older version of this function (below) can handle any
574          * number of fragments. If needed, we can call this function,
575          * although it is less efficient.
576          */
577         if (FragListCount > 23) {
578                 DBG_TX_LEAVE(et131x_dbginfo);
579                 return -EIO;
580         }
581
582         memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
583
584         for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
585                 /* If there is something in this element, lets get a
586                  * descriptor from the ring and get the necessary data
587                  */
588                 if (loopIndex == 0) {
589                         /* If the fragments are smaller than a standard MTU,
590                          * then map them to a single descriptor in the Tx
591                          * Desc ring. However, if they're larger, as is
592                          * possible with support for jumbo packets, then
593                          * split them each across 2 descriptors.
594                          *
595                          * This will work until we determine why the hardware
596                          * doesn't seem to like large fragments.
597                          */
598                         if ((pPacket->len - pPacket->data_len) <= 1514) {
599                                 DBG_TX(et131x_dbginfo,
600                                        "Got packet of length %d, "
601                                        "filling desc entry %d, "
602                                        "TCB: 0x%p\n",
603                                        (pPacket->len - pPacket->data_len),
604                                        etdev->TxRing.txDmaReadyToSend.bits.
605                                        val, pMpTcb);
606
607                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
608
609                                 CurDesc[FragmentNumber].word2.bits.
610                                     length_in_bytes =
611                                     pPacket->len - pPacket->data_len;
612
613                                 /* NOTE: Here, the dma_addr_t returned from
614                                  * pci_map_single() is implicitly cast as a
615                                  * uint32_t. Although dma_addr_t can be
616                                  * 64-bit, the address returned by
617                                  * pci_map_single() is always 32-bit
618                                  * addressable (as defined by the pci/dma
619                                  * subsystem)
620                                  */
621                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
622                                     pci_map_single(etdev->pdev,
623                                                    pPacket->data,
624                                                    pPacket->len -
625                                                    pPacket->data_len,
626                                                    PCI_DMA_TODEVICE);
627                         } else {
628                                 DBG_TX(et131x_dbginfo,
629                                        "Got packet of length %d, "
630                                        "filling desc entry %d, "
631                                        "TCB: 0x%p\n",
632                                        (pPacket->len - pPacket->data_len),
633                                        etdev->TxRing.txDmaReadyToSend.bits.
634                                        val, pMpTcb);
635
636                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
637
638                                 CurDesc[FragmentNumber].word2.bits.
639                                     length_in_bytes =
640                                     ((pPacket->len - pPacket->data_len) / 2);
641
642                                 /* NOTE: Here, the dma_addr_t returned from
643                                  * pci_map_single() is implicitly cast as a
644                                  * uint32_t. Although dma_addr_t can be
645                                  * 64-bit, the address returned by
646                                  * pci_map_single() is always 32-bit
647                                  * addressable (as defined by the pci/dma
648                                  * subsystem)
649                                  */
650                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
651                                     pci_map_single(etdev->pdev,
652                                                    pPacket->data,
653                                                    ((pPacket->len -
654                                                      pPacket->data_len) / 2),
655                                                    PCI_DMA_TODEVICE);
656                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
657
658                                 CurDesc[FragmentNumber].word2.bits.
659                                     length_in_bytes =
660                                     ((pPacket->len - pPacket->data_len) / 2);
661
662                                 /* NOTE: Here, the dma_addr_t returned from
663                                  * pci_map_single() is implicitly cast as a
664                                  * uint32_t. Although dma_addr_t can be
665                                  * 64-bit, the address returned by
666                                  * pci_map_single() is always 32-bit
667                                  * addressable (as defined by the pci/dma
668                                  * subsystem)
669                                  */
670                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
671                                     pci_map_single(etdev->pdev,
672                                                    pPacket->data +
673                                                    ((pPacket->len -
674                                                      pPacket->data_len) / 2),
675                                                    ((pPacket->len -
676                                                      pPacket->data_len) / 2),
677                                                    PCI_DMA_TODEVICE);
678                         }
679                 } else {
680                         DBG_TX(et131x_dbginfo,
681                                "Got packet of length %d,"
682                                "filling desc entry %d\n"
683                                "TCB: 0x%p\n",
684                                pFragList[loopIndex].size,
685                                etdev->TxRing.txDmaReadyToSend.bits.val,
686                                pMpTcb);
687
688                         CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
689
690                         CurDesc[FragmentNumber].word2.bits.length_in_bytes =
691                             pFragList[loopIndex - 1].size;
692
693                         /* NOTE: Here, the dma_addr_t returned from
694                          * pci_map_page() is implicitly cast as a uint32_t.
695                          * Although dma_addr_t can be 64-bit, the address
696                          * returned by pci_map_page() is always 32-bit
697                          * addressable (as defined by the pci/dma subsystem)
698                          */
699                         CurDesc[FragmentNumber++].DataBufferPtrLow =
700                             pci_map_page(etdev->pdev,
701                                          pFragList[loopIndex - 1].page,
702                                          pFragList[loopIndex - 1].page_offset,
703                                          pFragList[loopIndex - 1].size,
704                                          PCI_DMA_TODEVICE);
705                 }
706         }
707
708         if (FragmentNumber == 0) {
709                 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
710                 return -EIO;
711         }
712
713         if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
714                 if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
715                     PARM_TX_NUM_BUFS_DEF) {
716                         CurDesc[FragmentNumber - 1].word3.value = 0x5;
717                         etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
718                 } else {
719                         CurDesc[FragmentNumber - 1].word3.value = 0x1;
720                 }
721         } else {
722                 CurDesc[FragmentNumber - 1].word3.value = 0x5;
723         }
724
725         CurDesc[0].word3.bits.f = 1;
726
727         pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
728         pMpTcb->PacketStaleCount = 0;
729
730         spin_lock_irqsave(&etdev->SendHWLock, flags);
731
732         thiscopy =
733             NUM_DESC_PER_RING_TX - etdev->TxRing.txDmaReadyToSend.bits.val;
734
735         if (thiscopy >= FragmentNumber) {
736                 remainder = 0;
737                 thiscopy = FragmentNumber;
738         } else {
739                 remainder = FragmentNumber - thiscopy;
740         }
741
742         memcpy(etdev->TxRing.pTxDescRingVa +
743                etdev->TxRing.txDmaReadyToSend.bits.val, CurDesc,
744                sizeof(TX_DESC_ENTRY_t) * thiscopy);
745
746         etdev->TxRing.txDmaReadyToSend.bits.val += thiscopy;
747
748         if ((etdev->TxRing.txDmaReadyToSend.bits.val == 0) ||
749             (etdev->TxRing.txDmaReadyToSend.bits.val ==
750              NUM_DESC_PER_RING_TX)) {
751                 if (etdev->TxRing.txDmaReadyToSend.bits.wrap)
752                         etdev->TxRing.txDmaReadyToSend.value = 0;
753                 else
754                         etdev->TxRing.txDmaReadyToSend.value = 0x400;
755         }
756
757         if (remainder) {
758                 memcpy(etdev->TxRing.pTxDescRingVa,
759                        CurDesc + thiscopy,
760                        sizeof(TX_DESC_ENTRY_t) * remainder);
761
762                 etdev->TxRing.txDmaReadyToSend.bits.val += remainder;
763         }
764
765         if (etdev->TxRing.txDmaReadyToSend.bits.val == 0) {
766                 if (etdev->TxRing.txDmaReadyToSend.value)
767                         pMpTcb->WrIndex.value = NUM_DESC_PER_RING_TX - 1;
768                 else
769                         pMpTcb->WrIndex.value =
770                             0x400 | (NUM_DESC_PER_RING_TX - 1);
771         } else
772                 pMpTcb->WrIndex.value =
773                     etdev->TxRing.txDmaReadyToSend.value - 1;
774
775         spin_lock(&etdev->TCBSendQLock);
776
777         if (etdev->TxRing.CurrSendTail)
778                 etdev->TxRing.CurrSendTail->Next = pMpTcb;
779         else
780                 etdev->TxRing.CurrSendHead = pMpTcb;
781
782         etdev->TxRing.CurrSendTail = pMpTcb;
783
784         DBG_ASSERT(pMpTcb->Next == NULL);
785
786         etdev->TxRing.nBusySend++;
787
788         spin_unlock(&etdev->TCBSendQLock);
789
790         /* Write the new write pointer back to the device. */
791         writel(etdev->TxRing.txDmaReadyToSend.value,
792                &etdev->regs->txdma.service_request.value);
793
794         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
795          * timer to wake us up if this packet isn't followed by N more.
796          */
797         if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
798                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
799                        &etdev->regs->global.watchdog_timer);
800         }
801
802         spin_unlock_irqrestore(&etdev->SendHWLock, flags);
803
804         DBG_TX_LEAVE(et131x_dbginfo);
805         return 0;
806 }
807
808 /*
809  * NOTE: For now, keep this older version of NICSendPacket around for
810  * reference, even though it's not used
811  */
812 #if 0
813
814 /**
815  * NICSendPacket - NIC specific send handler.
816  * @etdev: pointer to our adapter
817  * @pMpTcb: pointer to MP_TCB
818  *
819  * Returns 0 on succes, errno on failure.
820  *
821  * This version of the send routine is designed for version A silicon.
822  * Assumption - Send spinlock has been acquired.
823  */
824 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
825 {
826         uint32_t loopIndex, fragIndex, loopEnd;
827         uint32_t splitfirstelem = 0;
828         uint32_t SegmentSize = 0;
829         TX_DESC_ENTRY_t CurDesc;
830         TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
831         uint32_t SlotsAvailable;
832         DMA10W_t ServiceComplete;
833         unsigned int flags;
834         struct sk_buff *pPacket = pMpTcb->Packet;
835         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
836         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
837
838         DBG_TX_ENTER(et131x_dbginfo);
839
840         ServiceComplete.value =
841                 readl(&etdev->regs->txdma.NewServiceComplete.value);
842
843         /*
844          * Attempt to fix TWO hardware bugs:
845          * 1)  NEVER write an odd number of descriptors.
846          * 2)  If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
847          *     packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
848          *     descriptor IN HALF DUPLEX MODE ONLY
849          * NOTE that (2) interacts with (1).  If the packet is less than
850          * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
851          * Therefore if it is even now, it will eventually end up odd, and
852          * so will need adjusting.
853          *
854          * VLAN tags get involved since VLAN tags add another one or two
855          * segments.
856          */
857         DBG_TX(et131x_dbginfo,
858                "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
859
860         if ((etdev->duplex_mode == 0)
861             && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
862                 DBG_TX(et131x_dbginfo,
863                        "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
864                 if ((FragListCount & 0x1) == 0) {
865                         DBG_TX(et131x_dbginfo,
866                                "Even number of descs, split 1st elem\n");
867                         splitfirstelem = 1;
868                         /* SegmentSize = pFragList[0].size / 2; */
869                         SegmentSize = (pPacket->len - pPacket->data_len) / 2;
870                 }
871         } else if (FragListCount & 0x1) {
872                 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
873
874                 splitfirstelem = 1;
875                 /* SegmentSize = pFragList[0].size / 2; */
876                 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
877         }
878
879         spin_lock_irqsave(&etdev->SendHWLock, flags);
880
881         if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
882             ServiceComplete.bits.serv_cpl_wrap) {
883                 /* The ring hasn't wrapped.  Slots available should be
884                  * (RING_SIZE) -  the difference between the two pointers.
885                  */
886                 SlotsAvailable = NUM_DESC_PER_RING_TX -
887                     (etdev->TxRing.txDmaReadyToSend.bits.serv_req -
888                      ServiceComplete.bits.serv_cpl);
889         } else {
890                 /* The ring has wrapped.  Slots available should be the
891                  * difference between the two pointers.
892                  */
893                 SlotsAvailable = ServiceComplete.bits.serv_cpl -
894                     etdev->TxRing.txDmaReadyToSend.bits.serv_req;
895         }
896
897         if ((FragListCount + splitfirstelem) > SlotsAvailable) {
898                 DBG_WARNING(et131x_dbginfo,
899                             "Not Enough Space in Tx Desc Ring\n");
900                 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
901                 return -ENOMEM;
902         }
903
904         loopEnd = (FragListCount) + splitfirstelem;
905         fragIndex = 0;
906
907         DBG_TX(et131x_dbginfo,
908                "TCB           : 0x%p\n"
909                "Packet (SKB)  : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
910                "FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n",
911                pMpTcb,
912                pPacket, pPacket->len, pPacket->data_len,
913                FragListCount, splitfirstelem, loopEnd);
914
915         for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
916                 if (loopIndex > splitfirstelem)
917                         fragIndex++;
918
919                 DBG_TX(et131x_dbginfo,
920                        "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
921                        fragIndex);
922
923                 /* If there is something in this element, let's get a
924                  * descriptor from the ring and get the necessary data
925                  */
926                 DBG_TX(et131x_dbginfo,
927                        "Packet Length %d,"
928                        "filling desc entry %d\n",
929                        pPacket->len,
930                        etdev->TxRing.txDmaReadyToSend.bits.serv_req);
931
932                 /*
933                  * NOTE - Should we do a paranoia check here to make sure the fragment
934                  * actually has a length? It's HIGHLY unlikely the fragment would
935                  * contain no data...
936                  */
937                 if (1) {
938                         /* NOTE - Currently always getting 32-bit addrs, and
939                          * dma_addr_t is only 32-bit, so leave "high" ptr
940                          * value out for now
941                          * CurDesc.DataBufferPtrHigh = 0;
942                          */
943
944                         CurDesc.word2.value = 0;
945                         CurDesc.word3.value = 0;
946
947                         if (fragIndex == 0) {
948                                 if (splitfirstelem) {
949                                         DBG_TX(et131x_dbginfo,
950                                                "Split first element: YES\n");
951
952                                         if (loopIndex == 0) {
953                                                 DBG_TX(et131x_dbginfo,
954                                                        "Got fragment of length %d, fragIndex: %d\n",
955                                                        pPacket->len -
956                                                        pPacket->data_len,
957                                                        fragIndex);
958                                                 DBG_TX(et131x_dbginfo,
959                                                        "SegmentSize: %d\n",
960                                                        SegmentSize);
961
962                                                 CurDesc.word2.bits.
963                                                     length_in_bytes =
964                                                     SegmentSize;
965                                                 CurDesc.DataBufferPtrLow =
966                                                     pci_map_single(etdev->
967                                                                    pdev,
968                                                                    pPacket->
969                                                                    data,
970                                                                    SegmentSize,
971                                                                    PCI_DMA_TODEVICE);
972                                                 DBG_TX(et131x_dbginfo,
973                                                        "pci_map_single() returns: 0x%08x\n",
974                                                        CurDesc.
975                                                        DataBufferPtrLow);
976                                         } else {
977                                                 DBG_TX(et131x_dbginfo,
978                                                        "Got fragment of length %d, fragIndex: %d\n",
979                                                        pPacket->len -
980                                                        pPacket->data_len,
981                                                        fragIndex);
982                                                 DBG_TX(et131x_dbginfo,
983                                                        "Leftover Size: %d\n",
984                                                        (pPacket->len -
985                                                         pPacket->data_len -
986                                                         SegmentSize));
987
988                                                 CurDesc.word2.bits.
989                                                     length_in_bytes =
990                                                     ((pPacket->len -
991                                                       pPacket->data_len) -
992                                                      SegmentSize);
993                                                 CurDesc.DataBufferPtrLow =
994                                                     pci_map_single(etdev->
995                                                                    pdev,
996                                                                    (pPacket->
997                                                                     data +
998                                                                     SegmentSize),
999                                                                    (pPacket->
1000                                                                     len -
1001                                                                     pPacket->
1002                                                                     data_len -
1003                                                                     SegmentSize),
1004                                                                    PCI_DMA_TODEVICE);
1005                                                 DBG_TX(et131x_dbginfo,
1006                                                        "pci_map_single() returns: 0x%08x\n",
1007                                                        CurDesc.
1008                                                        DataBufferPtrLow);
1009                                         }
1010                                 } else {
1011                                         DBG_TX(et131x_dbginfo,
1012                                                "Split first element: NO\n");
1013
1014                                         CurDesc.word2.bits.length_in_bytes =
1015                                             pPacket->len - pPacket->data_len;
1016
1017                                         CurDesc.DataBufferPtrLow =
1018                                             pci_map_single(etdev->pdev,
1019                                                            pPacket->data,
1020                                                            (pPacket->len -
1021                                                             pPacket->data_len),
1022                                                            PCI_DMA_TODEVICE);
1023                                         DBG_TX(et131x_dbginfo,
1024                                                "pci_map_single() returns: 0x%08x\n",
1025                                                CurDesc.DataBufferPtrLow);
1026                                 }
1027                         } else {
1028
1029                                 CurDesc.word2.bits.length_in_bytes =
1030                                     pFragList[fragIndex - 1].size;
1031                                 CurDesc.DataBufferPtrLow =
1032                                     pci_map_page(etdev->pdev,
1033                                                  pFragList[fragIndex - 1].page,
1034                                                  pFragList[fragIndex -
1035                                                            1].page_offset,
1036                                                  pFragList[fragIndex - 1].size,
1037                                                  PCI_DMA_TODEVICE);
1038                                 DBG_TX(et131x_dbginfo,
1039                                        "pci_map_page() returns: 0x%08x\n",
1040                                        CurDesc.DataBufferPtrLow);
1041                         }
1042
1043                         if (loopIndex == 0) {
1044                                 /* This is the first descriptor of the packet
1045                                  *
1046                                  * Set the "f" bit to indicate this is the
1047                                  * first descriptor in the packet.
1048                                  */
1049                                 DBG_TX(et131x_dbginfo,
1050                                        "This is our FIRST descriptor\n");
1051                                 CurDesc.word3.bits.f = 1;
1052
1053                                 pMpTcb->WrIndexStart =
1054                                     etdev->TxRing.txDmaReadyToSend;
1055                         }
1056
1057                         if ((loopIndex == (loopEnd - 1)) &&
1058                             (etdev->duplex_mode ||
1059                              (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1060                                 /* This is the Last descriptor of the packet */
1061                                 DBG_TX(et131x_dbginfo,
1062                                        "THIS is our LAST descriptor\n");
1063
1064                                 if (etdev->linkspeed ==
1065                                     TRUEPHY_SPEED_1000MBPS) {
1066                                         if (++etdev->TxRing.
1067                                             TxPacketsSinceLastinterrupt >=
1068                                             PARM_TX_NUM_BUFS_DEF) {
1069                                                 CurDesc.word3.value = 0x5;
1070                                                 etdev->TxRing.
1071                                                     TxPacketsSinceLastinterrupt
1072                                                     = 0;
1073                                         } else {
1074                                                 CurDesc.word3.value = 0x1;
1075                                         }
1076                                 } else {
1077                                         CurDesc.word3.value = 0x5;
1078                                 }
1079
1080                                 /* Following index will be used during freeing
1081                                  * of packet
1082                                  */
1083                                 pMpTcb->WrIndex =
1084                                     etdev->TxRing.txDmaReadyToSend;
1085                                 pMpTcb->PacketStaleCount = 0;
1086                         }
1087
1088                         /* Copy the descriptor (filled above) into the
1089                          * descriptor ring at the next free entry.  Advance
1090                          * the "next free entry" variable
1091                          */
1092                         memcpy(etdev->TxRing.pTxDescRingVa +
1093                                etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1094                                &CurDesc, sizeof(TX_DESC_ENTRY_t));
1095
1096                         CurDescPostCopy =
1097                             etdev->TxRing.pTxDescRingVa +
1098                             etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1099
1100                         DBG_TX(et131x_dbginfo,
1101                                "CURRENT DESCRIPTOR\n"
1102                                "\tAddress           : 0x%p\n"
1103                                "\tDataBufferPtrHigh : 0x%08x\n"
1104                                "\tDataBufferPtrLow  : 0x%08x\n"
1105                                "\tword2             : 0x%08x\n"
1106                                "\tword3             : 0x%08x\n",
1107                                CurDescPostCopy,
1108                                CurDescPostCopy->DataBufferPtrHigh,
1109                                CurDescPostCopy->DataBufferPtrLow,
1110                                CurDescPostCopy->word2.value,
1111                                CurDescPostCopy->word3.value);
1112
1113                         if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1114                             NUM_DESC_PER_RING_TX) {
1115                                 if (etdev->TxRing.txDmaReadyToSend.bits.
1116                                     serv_req_wrap) {
1117                                         etdev->TxRing.txDmaReadyToSend.
1118                                             value = 0;
1119                                 } else {
1120                                         etdev->TxRing.txDmaReadyToSend.
1121                                             value = 0x400;
1122                                 }
1123                         }
1124                 }
1125         }
1126
1127         if (etdev->duplex_mode == 0 &&
1128             pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1129                 /* NOTE - Same 32/64-bit issue as above... */
1130                 CurDesc.DataBufferPtrHigh = 0x0;
1131                 CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa;
1132                 CurDesc.word2.value = 0;
1133
1134                 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1135                         if (++etdev->TxRing.TxPacketsSinceLastinterrupt >=
1136                             PARM_TX_NUM_BUFS_DEF) {
1137                                 CurDesc.word3.value = 0x5;
1138                                 etdev->TxRing.TxPacketsSinceLastinterrupt =
1139                                     0;
1140                         } else {
1141                                 CurDesc.word3.value = 0x1;
1142                         }
1143                 } else {
1144                         CurDesc.word3.value = 0x5;
1145                 }
1146
1147                 CurDesc.word2.bits.length_in_bytes =
1148                     NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1149
1150                 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend;
1151
1152                 memcpy(etdev->TxRing.pTxDescRingVa +
1153                        etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1154                        &CurDesc, sizeof(TX_DESC_ENTRY_t));
1155
1156                 CurDescPostCopy =
1157                     etdev->TxRing.pTxDescRingVa +
1158                     etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1159
1160                 DBG_TX(et131x_dbginfo,
1161                        "CURRENT DESCRIPTOR\n"
1162                        "\tAddress           : 0x%p\n"
1163                        "\tDataBufferPtrHigh : 0x%08x\n"
1164                        "\tDataBufferPtrLow  : 0x%08x\n"
1165                        "\tword2             : 0x%08x\n"
1166                        "\tword3             : 0x%08x\n",
1167                        CurDescPostCopy,
1168                        CurDescPostCopy->DataBufferPtrHigh,
1169                        CurDescPostCopy->DataBufferPtrLow,
1170                        CurDescPostCopy->word2.value,
1171                        CurDescPostCopy->word3.value);
1172
1173                 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1174                     NUM_DESC_PER_RING_TX) {
1175                         if (etdev->TxRing.txDmaReadyToSend.bits.
1176                             serv_req_wrap) {
1177                                 etdev->TxRing.txDmaReadyToSend.value = 0;
1178                         } else {
1179                                 etdev->TxRing.txDmaReadyToSend.value = 0x400;
1180                         }
1181                 }
1182
1183                 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1184                        /* etdev->TxRing.txDmaReadyToSend.value, */
1185                        etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1186                        NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1187         }
1188
1189         spin_lock(&etdev->TCBSendQLock);
1190
1191         if (etdev->TxRing.CurrSendTail)
1192                 etdev->TxRing.CurrSendTail->Next = pMpTcb;
1193         else
1194                 etdev->TxRing.CurrSendHead = pMpTcb;
1195
1196         etdev->TxRing.CurrSendTail = pMpTcb;
1197
1198         DBG_ASSERT(pMpTcb->Next == NULL);
1199
1200         etdev->TxRing.nBusySend++;
1201
1202         spin_unlock(&etdev->TCBSendQLock);
1203
1204         /* Write the new write pointer back to the device. */
1205         writel(etdev->TxRing.txDmaReadyToSend.value,
1206                &etdev->regs->txdma.service_request.value);
1207
1208 #ifdef CONFIG_ET131X_DEBUG
1209         DumpDeviceBlock(DBG_TX_ON, etdev, 1);
1210 #endif
1211
1212         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
1213          * timer to wake us up if this packet isn't followed by N more.
1214          */
1215         if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1216                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1217                        &etdev->regs->global.watchdog_timer);
1218         }
1219
1220         spin_unlock_irqrestore(&etdev->SendHWLock, flags);
1221
1222         DBG_TX_LEAVE(et131x_dbginfo);
1223         return 0;
1224 }
1225
1226 #endif
1227
1228 /**
1229  * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1230  * @etdev: pointer to our adapter
1231  * @pMpTcb: pointer to MP_TCB
1232  *
1233  * Assumption - Send spinlock has been acquired
1234  */
1235 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
1236                                                         PMP_TCB pMpTcb)
1237 {
1238         unsigned long flags;
1239         TX_DESC_ENTRY_t *desc = NULL;
1240         struct net_device_stats *stats = &etdev->net_stats;
1241
1242         if (MP_TEST_FLAG(pMpTcb, fMP_DEST_BROAD))
1243                 atomic_inc(&etdev->Stats.brdcstxmt);
1244         else if (MP_TEST_FLAG(pMpTcb, fMP_DEST_MULTI))
1245                 atomic_inc(&etdev->Stats.multixmt);
1246         else
1247                 atomic_inc(&etdev->Stats.unixmt);
1248
1249         if (pMpTcb->Packet) {
1250                 stats->tx_bytes += pMpTcb->Packet->len;
1251
1252                 /* Iterate through the TX descriptors on the ring
1253                  * corresponding to this packet and umap the fragments
1254                  * they point to
1255                  */
1256                 DBG_TX(et131x_dbginfo,
1257                        "Unmap descriptors Here\n"
1258                        "TCB                  : 0x%p\n"
1259                        "TCB Next             : 0x%p\n"
1260                        "TCB PacketLength     : %d\n"
1261                        "TCB WrIndex.value    : 0x%08x\n"
1262                        "TCB WrIndex.bits.val : %d\n"
1263                        "TCB WrIndex.value    : 0x%08x\n"
1264                        "TCB WrIndex.bits.val : %d\n",
1265                        pMpTcb,
1266                        pMpTcb->Next,
1267                        pMpTcb->PacketLength,
1268                        pMpTcb->WrIndexStart.value,
1269                        pMpTcb->WrIndexStart.bits.val,
1270                        pMpTcb->WrIndex.value,
1271                        pMpTcb->WrIndex.bits.val);
1272
1273                 do {
1274                         desc =
1275                             (TX_DESC_ENTRY_t *) (etdev->TxRing.
1276                                                  pTxDescRingVa +
1277                                                  pMpTcb->WrIndexStart.bits.val);
1278
1279                         DBG_TX(et131x_dbginfo,
1280                                "CURRENT DESCRIPTOR\n"
1281                                "\tAddress           : 0x%p\n"
1282                                "\tDataBufferPtrHigh : 0x%08x\n"
1283                                "\tDataBufferPtrLow  : 0x%08x\n"
1284                                "\tword2             : 0x%08x\n"
1285                                "\tword3             : 0x%08x\n",
1286                                desc,
1287                                desc->DataBufferPtrHigh,
1288                                desc->DataBufferPtrLow,
1289                                desc->word2.value,
1290                                desc->word3.value);
1291
1292                         pci_unmap_single(etdev->pdev,
1293                                          desc->DataBufferPtrLow,
1294                                          desc->word2.value, PCI_DMA_TODEVICE);
1295
1296                         if (++pMpTcb->WrIndexStart.bits.val >=
1297                             NUM_DESC_PER_RING_TX) {
1298                                 if (pMpTcb->WrIndexStart.bits.wrap)
1299                                         pMpTcb->WrIndexStart.value = 0;
1300                                 else
1301                                         pMpTcb->WrIndexStart.value = 0x400;
1302                         }
1303                 } while (desc != (etdev->TxRing.pTxDescRingVa +
1304                                 pMpTcb->WrIndex.bits.val));
1305
1306                 DBG_TX(et131x_dbginfo,
1307                        "Free Packet (SKB)   : 0x%p\n", pMpTcb->Packet);
1308
1309                 dev_kfree_skb_any(pMpTcb->Packet);
1310         }
1311
1312         memset(pMpTcb, 0, sizeof(MP_TCB));
1313
1314         /* Add the TCB to the Ready Q */
1315         spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
1316
1317         etdev->Stats.opackets++;
1318
1319         if (etdev->TxRing.TCBReadyQueueTail) {
1320                 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1321         } else {
1322                 /* Apparently ready Q is empty. */
1323                 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
1324         }
1325
1326         etdev->TxRing.TCBReadyQueueTail = pMpTcb;
1327
1328         spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
1329
1330         DBG_ASSERT(etdev->TxRing.nBusySend >= 0);
1331 }
1332
1333 /**
1334  * et131x_free_busy_send_packets - Free and complete the stopped active sends
1335  * @etdev: pointer to our adapter
1336  *
1337  * Assumption - Send spinlock has been acquired
1338  */
1339 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
1340 {
1341         PMP_TCB pMpTcb;
1342         struct list_head *entry;
1343         unsigned long flags;
1344         uint32_t FreeCounter = 0;
1345
1346         DBG_ENTER(et131x_dbginfo);
1347
1348         while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
1349                 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1350
1351                 etdev->TxRing.nWaitSend--;
1352                 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1353
1354                 entry = etdev->TxRing.SendWaitQueue.next;
1355         }
1356
1357         etdev->TxRing.nWaitSend = 0;
1358
1359         /* Any packets being sent? Check the first TCB on the send list */
1360         spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1361
1362         pMpTcb = etdev->TxRing.CurrSendHead;
1363
1364         while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1365                 PMP_TCB pNext = pMpTcb->Next;
1366
1367                 etdev->TxRing.CurrSendHead = pNext;
1368
1369                 if (pNext == NULL)
1370                         etdev->TxRing.CurrSendTail = NULL;
1371
1372                 etdev->TxRing.nBusySend--;
1373
1374                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1375
1376                 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1377
1378                 FreeCounter++;
1379                 et131x_free_send_packet(etdev, pMpTcb);
1380
1381                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1382
1383                 pMpTcb = etdev->TxRing.CurrSendHead;
1384         }
1385
1386         if (FreeCounter == NUM_TCB) {
1387                 DBG_ERROR(et131x_dbginfo,
1388                     "MpFreeBusySendPackets exited loop for a bad reason\n");
1389                 BUG();
1390         }
1391
1392         spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1393
1394         etdev->TxRing.nBusySend = 0;
1395
1396         DBG_LEAVE(et131x_dbginfo);
1397 }
1398
1399 /**
1400  * et131x_handle_send_interrupt - Interrupt handler for sending processing
1401  * @etdev: pointer to our adapter
1402  *
1403  * Re-claim the send resources, complete sends and get more to send from
1404  * the send wait queue.
1405  *
1406  * Assumption - Send spinlock has been acquired
1407  */
1408 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
1409 {
1410         DBG_TX_ENTER(et131x_dbginfo);
1411
1412         /* Mark as completed any packets which have been sent by the device. */
1413         et131x_update_tcb_list(etdev);
1414
1415         /* If we queued any transmits because we didn't have any TCBs earlier,
1416          * dequeue and send those packets now, as long as we have free TCBs.
1417          */
1418         et131x_check_send_wait_list(etdev);
1419
1420         DBG_TX_LEAVE(et131x_dbginfo);
1421 }
1422
1423 /**
1424  * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1425  * @etdev: pointer to our adapter
1426  *
1427  * Re-claims the send resources and completes sends.  Can also be called as
1428  * part of the NIC send routine when the "ServiceComplete" indication has
1429  * wrapped.
1430  */
1431 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
1432 {
1433         unsigned long flags;
1434         DMA10W_t ServiceComplete;
1435         PMP_TCB pMpTcb;
1436
1437         ServiceComplete.value =
1438             readl(&etdev->regs->txdma.NewServiceComplete.value);
1439
1440         /* Has the ring wrapped?  Process any descriptors that do not have
1441          * the same "wrap" indicator as the current completion indicator
1442          */
1443         spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1444
1445         pMpTcb = etdev->TxRing.CurrSendHead;
1446         while (pMpTcb &&
1447                ServiceComplete.bits.wrap != pMpTcb->WrIndex.bits.wrap  &&
1448                ServiceComplete.bits.val < pMpTcb->WrIndex.bits.val) {
1449                 etdev->TxRing.nBusySend--;
1450                 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1451                 if (pMpTcb->Next == NULL)
1452                         etdev->TxRing.CurrSendTail = NULL;
1453
1454                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1455                 et131x_free_send_packet(etdev, pMpTcb);
1456                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1457
1458                 /* Goto the next packet */
1459                 pMpTcb = etdev->TxRing.CurrSendHead;
1460         }
1461         while (pMpTcb &&
1462                ServiceComplete.bits.wrap == pMpTcb->WrIndex.bits.wrap &&
1463                ServiceComplete.bits.val > pMpTcb->WrIndex.bits.val) {
1464                 etdev->TxRing.nBusySend--;
1465                 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1466                 if (pMpTcb->Next == NULL)
1467                         etdev->TxRing.CurrSendTail = NULL;
1468
1469                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1470                 et131x_free_send_packet(etdev, pMpTcb);
1471                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1472
1473                 /* Goto the next packet */
1474                 pMpTcb = etdev->TxRing.CurrSendHead;
1475         }
1476
1477         /* Wake up the queue when we hit a low-water mark */
1478         if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
1479                 netif_wake_queue(etdev->netdev);
1480
1481         spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1482 }
1483
1484 /**
1485  * et131x_check_send_wait_list - Helper routine for the interrupt handler
1486  * @etdev: pointer to our adapter
1487  *
1488  * Takes packets from the send wait queue and posts them to the device (if
1489  * room available).
1490  */
1491 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
1492 {
1493         unsigned long flags;
1494
1495         spin_lock_irqsave(&etdev->SendWaitLock, flags);
1496
1497         while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
1498                                 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
1499                 struct list_head *entry;
1500
1501                 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1502
1503                 entry = etdev->TxRing.SendWaitQueue.next;
1504
1505                 etdev->TxRing.nWaitSend--;
1506
1507                 DBG_WARNING(et131x_dbginfo,
1508                     "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1509                                 etdev->TxRing.nWaitSend);
1510         }
1511
1512         spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1513 }