2782771ac49f5382cdccf2d716ed6a66754ac386
[safe/jmp/linux-2.6] / drivers / staging / et131x / et1310_tx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_tx.c - Routines used to perform data transmission.
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
61
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
67
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/in.h>
76 #include <linux/delay.h>
77 #include <linux/io.h>
78 #include <linux/bitops.h>
79 #include <asm/system.h>
80
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
86
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
90
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
94
95 #include "et1310_tx.h"
96
97
98 /* Data for debugging facilities */
99 #ifdef CONFIG_ET131X_DEBUG
100 extern dbg_info_t *et131x_dbginfo;
101 #endif /* CONFIG_ET131X_DEBUG */
102
103 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
104 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
105 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
106                                            PMP_TCB pMpTcb);
107 static int et131x_send_packet(struct sk_buff *skb,
108                               struct et131x_adapter *etdev);
109 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
110
111 /**
112  * et131x_tx_dma_memory_alloc
113  * @adapter: pointer to our private adapter structure
114  *
115  * Returns 0 on success and errno on failure (as defined in errno.h).
116  *
117  * Allocates memory that will be visible both to the device and to the CPU.
118  * The OS will pass us packets, pointers to which we will insert in the Tx
119  * Descriptor queue. The device will read this queue to find the packets in
120  * memory. The device will update the "status" in memory each time it xmits a
121  * packet.
122  */
123 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
124 {
125         int desc_size = 0;
126         TX_RING_t *tx_ring = &adapter->TxRing;
127
128         DBG_ENTER(et131x_dbginfo);
129
130         /* Allocate memory for the TCB's (Transmit Control Block) */
131         adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
132                                                       GFP_ATOMIC | GFP_DMA);
133         if (!adapter->TxRing.MpTcbMem) {
134                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
135                 DBG_LEAVE(et131x_dbginfo);
136                 return -ENOMEM;
137         }
138
139         /* Allocate enough memory for the Tx descriptor ring, and allocate
140          * some extra so that the ring can be aligned on a 4k boundary.
141          */
142         desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
143         tx_ring->pTxDescRingVa =
144             (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
145                                                     &tx_ring->pTxDescRingPa);
146         if (!adapter->TxRing.pTxDescRingVa) {
147                 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
148                 DBG_LEAVE(et131x_dbginfo);
149                 return -ENOMEM;
150         }
151
152         /* Save physical address
153          *
154          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
155          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
156          * are ever returned, make sure the high part is retrieved here before
157          * storing the adjusted address.
158          */
159         tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
160
161         /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
162         et131x_align_allocated_memory(adapter,
163                                       &tx_ring->pTxDescRingAdjustedPa,
164                                       &tx_ring->TxDescOffset, 0x0FFF);
165
166         tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
167
168         /* Allocate memory for the Tx status block */
169         tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
170                                                     sizeof(TX_STATUS_BLOCK_t),
171                                                     &tx_ring->pTxStatusPa);
172         if (!adapter->TxRing.pTxStatusPa) {
173                 DBG_ERROR(et131x_dbginfo,
174                           "Cannot alloc memory for Tx status block\n");
175                 DBG_LEAVE(et131x_dbginfo);
176                 return -ENOMEM;
177         }
178
179         /* Allocate memory for a dummy buffer */
180         tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
181                                                       NIC_MIN_PACKET_SIZE,
182                                                       &tx_ring->pTxDummyBlkPa);
183         if (!adapter->TxRing.pTxDummyBlkPa) {
184                 DBG_ERROR(et131x_dbginfo,
185                           "Cannot alloc memory for Tx dummy buffer\n");
186                 DBG_LEAVE(et131x_dbginfo);
187                 return -ENOMEM;
188         }
189
190         DBG_LEAVE(et131x_dbginfo);
191         return 0;
192 }
193
194 /**
195  * et131x_tx_dma_memory_free - Free all memory allocated within this module
196  * @adapter: pointer to our private adapter structure
197  *
198  * Returns 0 on success and errno on failure (as defined in errno.h).
199  */
200 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
201 {
202         int desc_size = 0;
203
204         DBG_ENTER(et131x_dbginfo);
205
206         if (adapter->TxRing.pTxDescRingVa) {
207                 /* Free memory relating to Tx rings here */
208                 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
209
210                 desc_size =
211                     (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
212
213                 pci_free_consistent(adapter->pdev,
214                                     desc_size,
215                                     adapter->TxRing.pTxDescRingVa,
216                                     adapter->TxRing.pTxDescRingPa);
217
218                 adapter->TxRing.pTxDescRingVa = NULL;
219         }
220
221         /* Free memory for the Tx status block */
222         if (adapter->TxRing.pTxStatusVa) {
223                 pci_free_consistent(adapter->pdev,
224                                     sizeof(TX_STATUS_BLOCK_t),
225                                     adapter->TxRing.pTxStatusVa,
226                                     adapter->TxRing.pTxStatusPa);
227
228                 adapter->TxRing.pTxStatusVa = NULL;
229         }
230
231         /* Free memory for the dummy buffer */
232         if (adapter->TxRing.pTxDummyBlkVa) {
233                 pci_free_consistent(adapter->pdev,
234                                     NIC_MIN_PACKET_SIZE,
235                                     adapter->TxRing.pTxDummyBlkVa,
236                                     adapter->TxRing.pTxDummyBlkPa);
237
238                 adapter->TxRing.pTxDummyBlkVa = NULL;
239         }
240
241         /* Free the memory for MP_TCB structures */
242         kfree(adapter->TxRing.MpTcbMem);
243
244         DBG_LEAVE(et131x_dbginfo);
245 }
246
247 /**
248  * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
249  * @etdev: pointer to our private adapter structure
250  */
251 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
252 {
253         struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
254
255         DBG_ENTER(et131x_dbginfo);
256
257         /* Load the hardware with the start of the transmit descriptor ring. */
258         writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
259                &txdma->pr_base_hi);
260         writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
261                &txdma->pr_base_lo);
262
263         /* Initialise the transmit DMA engine */
264         writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
265
266         /* Load the completion writeback physical address
267          *
268          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
269          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
270          * are ever returned, make sure the high part is retrieved here before
271          * storing the adjusted address.
272          */
273         writel(0, &txdma->dma_wb_base_hi);
274         writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
275
276         memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
277
278         writel(0, &txdma->service_request);
279         etdev->TxRing.txDmaReadyToSend = 0;
280
281         DBG_LEAVE(et131x_dbginfo);
282 }
283
284 /**
285  * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
286  * @etdev: pointer to our adapter structure
287  */
288 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
289 {
290         DBG_ENTER(et131x_dbginfo);
291
292         /* Setup the tramsmit dma configuration register */
293         writel(0x101, &etdev->regs->txdma.csr.value);
294
295         DBG_LEAVE(et131x_dbginfo);
296 }
297
298 /**
299  * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
300  * @etdev: pointer to our adapter structure
301  *
302  * Mainly used after a return to the D0 (full-power) state from a lower state.
303  */
304 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
305 {
306         DBG_ENTER(et131x_dbginfo);
307
308         if (etdev->RegistryPhyLoopbk) {
309                 /* TxDMA is disabled for loopback operation. */
310                 writel(0x101, &etdev->regs->txdma.csr.value);
311         } else {
312                 TXDMA_CSR_t csr = { 0 };
313
314                 /* Setup the transmit dma configuration register for normal
315                  * operation
316                  */
317                 csr.bits.sngl_epkt_mode = 1;
318                 csr.bits.halt = 0;
319                 csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF;
320                 writel(csr.value, &etdev->regs->txdma.csr.value);
321         }
322
323         DBG_LEAVE(et131x_dbginfo);
324 }
325
326 /**
327  * et131x_init_send - Initialize send data structures
328  * @adapter: pointer to our private adapter structure
329  */
330 void et131x_init_send(struct et131x_adapter *adapter)
331 {
332         PMP_TCB pMpTcb;
333         uint32_t TcbCount;
334         TX_RING_t *tx_ring;
335
336         DBG_ENTER(et131x_dbginfo);
337
338         /* Setup some convenience pointers */
339         tx_ring = &adapter->TxRing;
340         pMpTcb = adapter->TxRing.MpTcbMem;
341
342         tx_ring->TCBReadyQueueHead = pMpTcb;
343
344         /* Go through and set up each TCB */
345         for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
346                 memset(pMpTcb, 0, sizeof(MP_TCB));
347
348                 /* Set the link pointer in HW TCB to the next TCB in the
349                  * chain.  If this is the last TCB in the chain, also set the
350                  * tail pointer.
351                  */
352                 if (TcbCount < NUM_TCB - 1) {
353                         pMpTcb->Next = pMpTcb + 1;
354                 } else {
355                         tx_ring->TCBReadyQueueTail = pMpTcb;
356                         pMpTcb->Next = (PMP_TCB) NULL;
357                 }
358
359                 pMpTcb++;
360         }
361
362         /* Curr send queue should now be empty */
363         tx_ring->CurrSendHead = (PMP_TCB) NULL;
364         tx_ring->CurrSendTail = (PMP_TCB) NULL;
365
366         INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
367
368         DBG_LEAVE(et131x_dbginfo);
369 }
370
371 /**
372  * et131x_send_packets - This function is called by the OS to send packets
373  * @skb: the packet(s) to send
374  * @netdev:device on which to TX the above packet(s)
375  *
376  * Return 0 in almost all cases; non-zero value in extreme hard failure only
377  */
378 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
379 {
380         int status = 0;
381         struct et131x_adapter *etdev = NULL;
382
383         DBG_TX_ENTER(et131x_dbginfo);
384
385         etdev = netdev_priv(netdev);
386
387         /* Send these packets
388          *
389          * NOTE: The Linux Tx entry point is only given one packet at a time
390          * to Tx, so the PacketCount and it's array used makes no sense here
391          */
392
393         /* Queue is not empty or TCB is not available */
394         if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
395             MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
396                 /* NOTE: If there's an error on send, no need to queue the
397                  * packet under Linux; if we just send an error up to the
398                  * netif layer, it will resend the skb to us.
399                  */
400                 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
401                 status = -ENOMEM;
402         } else {
403                 /* We need to see if the link is up; if it's not, make the
404                  * netif layer think we're good and drop the packet
405                  */
406                 /*
407                  * if( MP_SHOULD_FAIL_SEND( etdev ) ||
408                  *  etdev->DriverNoPhyAccess )
409                  */
410                 if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess
411                     || !netif_carrier_ok(netdev)) {
412                         DBG_VERBOSE(et131x_dbginfo,
413                                 "Can't Tx, Link is DOWN; drop the packet\n");
414
415                         dev_kfree_skb_any(skb);
416                         skb = NULL;
417
418                         etdev->net_stats.tx_dropped++;
419                 } else {
420                         status = et131x_send_packet(skb, etdev);
421
422                         if (status == -ENOMEM) {
423
424                                 /* NOTE: If there's an error on send, no need
425                                  * to queue the packet under Linux; if we just
426                                  * send an error up to the netif layer, it
427                                  * will resend the skb to us.
428                                  */
429                                 DBG_WARNING(et131x_dbginfo,
430                                             "Resources problem, Queue tx packet\n");
431                         } else if (status != 0) {
432                                 /* On any other error, make netif think we're
433                                  * OK and drop the packet
434                                  */
435                                 DBG_WARNING(et131x_dbginfo,
436                                             "General error, drop packet\n");
437
438                                 dev_kfree_skb_any(skb);
439                                 skb = NULL;
440
441                                 etdev->net_stats.tx_dropped++;
442                         }
443                 }
444         }
445
446         DBG_TX_LEAVE(et131x_dbginfo);
447         return status;
448 }
449
450 /**
451  * et131x_send_packet - Do the work to send a packet
452  * @skb: the packet(s) to send
453  * @etdev: a pointer to the device's private adapter structure
454  *
455  * Return 0 in almost all cases; non-zero value in extreme hard failure only.
456  *
457  * Assumption: Send spinlock has been acquired
458  */
459 static int et131x_send_packet(struct sk_buff *skb,
460                               struct et131x_adapter *etdev)
461 {
462         int status = 0;
463         PMP_TCB pMpTcb = NULL;
464         uint16_t *shbufva;
465         unsigned long flags;
466
467         DBG_TX_ENTER(et131x_dbginfo);
468
469         /* Is our buffer scattered, or continuous? */
470         if (skb_shinfo(skb)->nr_frags == 0) {
471                 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
472         } else {
473                 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
474                        skb_shinfo(skb)->nr_frags);
475         }
476
477         /* All packets must have at least a MAC address and a protocol type */
478         if (skb->len < ETH_HLEN) {
479                 DBG_ERROR(et131x_dbginfo,
480                           "Packet size < ETH_HLEN (14 bytes)\n");
481                 DBG_LEAVE(et131x_dbginfo);
482                 return -EIO;
483         }
484
485         /* Get a TCB for this packet */
486         spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
487
488         pMpTcb = etdev->TxRing.TCBReadyQueueHead;
489
490         if (pMpTcb == NULL) {
491                 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
492
493                 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
494                 DBG_TX_LEAVE(et131x_dbginfo);
495                 return -ENOMEM;
496         }
497
498         etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
499
500         if (etdev->TxRing.TCBReadyQueueHead == NULL)
501                 etdev->TxRing.TCBReadyQueueTail = NULL;
502
503         spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
504
505         pMpTcb->PacketLength = skb->len;
506         pMpTcb->Packet = skb;
507
508         if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
509                 shbufva = (uint16_t *) skb->data;
510
511                 if ((shbufva[0] == 0xffff) &&
512                     (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
513                         MP_SET_FLAG(pMpTcb, fMP_DEST_BROAD);
514                 } else if ((shbufva[0] & 0x3) == 0x0001) {
515                         MP_SET_FLAG(pMpTcb, fMP_DEST_MULTI);
516                 }
517         }
518
519         pMpTcb->Next = NULL;
520
521         /* Call the NIC specific send handler. */
522         if (status == 0)
523                 status = nic_send_packet(etdev, pMpTcb);
524
525         if (status != 0) {
526                 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
527
528                 if (etdev->TxRing.TCBReadyQueueTail) {
529                         etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
530                 } else {
531                         /* Apparently ready Q is empty. */
532                         etdev->TxRing.TCBReadyQueueHead = pMpTcb;
533                 }
534
535                 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
536
537                 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
538
539                 DBG_TX_LEAVE(et131x_dbginfo);
540                 return status;
541         }
542
543         DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB);
544
545         DBG_TX_LEAVE(et131x_dbginfo);
546         return 0;
547 }
548
549 /**
550  * nic_send_packet - NIC specific send handler for version B silicon.
551  * @etdev: pointer to our adapter
552  * @pMpTcb: pointer to MP_TCB
553  *
554  * Returns 0 or errno.
555  */
556 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
557 {
558         uint32_t loopIndex;
559         TX_DESC_ENTRY_t CurDesc[24];
560         uint32_t FragmentNumber = 0;
561         uint32_t thiscopy, remainder;
562         struct sk_buff *pPacket = pMpTcb->Packet;
563         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
564         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
565         unsigned long flags;
566
567         DBG_TX_ENTER(et131x_dbginfo);
568
569         /* Part of the optimizations of this send routine restrict us to
570          * sending 24 fragments at a pass.  In practice we should never see
571          * more than 5 fragments.
572          *
573          * NOTE: The older version of this function (below) can handle any
574          * number of fragments. If needed, we can call this function,
575          * although it is less efficient.
576          */
577         if (FragListCount > 23) {
578                 DBG_TX_LEAVE(et131x_dbginfo);
579                 return -EIO;
580         }
581
582         memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
583
584         for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
585                 /* If there is something in this element, lets get a
586                  * descriptor from the ring and get the necessary data
587                  */
588                 if (loopIndex == 0) {
589                         /* If the fragments are smaller than a standard MTU,
590                          * then map them to a single descriptor in the Tx
591                          * Desc ring. However, if they're larger, as is
592                          * possible with support for jumbo packets, then
593                          * split them each across 2 descriptors.
594                          *
595                          * This will work until we determine why the hardware
596                          * doesn't seem to like large fragments.
597                          */
598                         if ((pPacket->len - pPacket->data_len) <= 1514) {
599                                 DBG_TX(et131x_dbginfo,
600                                        "Got packet of length %d, "
601                                        "filling desc entry %d, "
602                                        "TCB: 0x%p\n",
603                                        (pPacket->len - pPacket->data_len),
604                                        etdev->TxRing.txDmaReadyToSend, pMpTcb);
605
606                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
607
608                                 CurDesc[FragmentNumber].word2.bits.
609                                     length_in_bytes =
610                                     pPacket->len - pPacket->data_len;
611
612                                 /* NOTE: Here, the dma_addr_t returned from
613                                  * pci_map_single() is implicitly cast as a
614                                  * uint32_t. Although dma_addr_t can be
615                                  * 64-bit, the address returned by
616                                  * pci_map_single() is always 32-bit
617                                  * addressable (as defined by the pci/dma
618                                  * subsystem)
619                                  */
620                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
621                                     pci_map_single(etdev->pdev,
622                                                    pPacket->data,
623                                                    pPacket->len -
624                                                    pPacket->data_len,
625                                                    PCI_DMA_TODEVICE);
626                         } else {
627                                 DBG_TX(et131x_dbginfo,
628                                        "Got packet of length %d, "
629                                        "filling desc entry %d, "
630                                        "TCB: 0x%p\n",
631                                        (pPacket->len - pPacket->data_len),
632                                        etdev->TxRing.txDmaReadyToSend, pMpTcb);
633
634                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
635
636                                 CurDesc[FragmentNumber].word2.bits.
637                                     length_in_bytes =
638                                     ((pPacket->len - pPacket->data_len) / 2);
639
640                                 /* NOTE: Here, the dma_addr_t returned from
641                                  * pci_map_single() is implicitly cast as a
642                                  * uint32_t. Although dma_addr_t can be
643                                  * 64-bit, the address returned by
644                                  * pci_map_single() is always 32-bit
645                                  * addressable (as defined by the pci/dma
646                                  * subsystem)
647                                  */
648                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
649                                     pci_map_single(etdev->pdev,
650                                                    pPacket->data,
651                                                    ((pPacket->len -
652                                                      pPacket->data_len) / 2),
653                                                    PCI_DMA_TODEVICE);
654                                 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
655
656                                 CurDesc[FragmentNumber].word2.bits.
657                                     length_in_bytes =
658                                     ((pPacket->len - pPacket->data_len) / 2);
659
660                                 /* NOTE: Here, the dma_addr_t returned from
661                                  * pci_map_single() is implicitly cast as a
662                                  * uint32_t. Although dma_addr_t can be
663                                  * 64-bit, the address returned by
664                                  * pci_map_single() is always 32-bit
665                                  * addressable (as defined by the pci/dma
666                                  * subsystem)
667                                  */
668                                 CurDesc[FragmentNumber++].DataBufferPtrLow =
669                                     pci_map_single(etdev->pdev,
670                                                    pPacket->data +
671                                                    ((pPacket->len -
672                                                      pPacket->data_len) / 2),
673                                                    ((pPacket->len -
674                                                      pPacket->data_len) / 2),
675                                                    PCI_DMA_TODEVICE);
676                         }
677                 } else {
678                         DBG_TX(et131x_dbginfo,
679                                "Got packet of length %d,"
680                                "filling desc entry %d\n"
681                                "TCB: 0x%p\n",
682                                pFragList[loopIndex].size,
683                                etdev->TxRing.txDmaReadyToSend,
684                                pMpTcb);
685
686                         CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
687
688                         CurDesc[FragmentNumber].word2.bits.length_in_bytes =
689                             pFragList[loopIndex - 1].size;
690
691                         /* NOTE: Here, the dma_addr_t returned from
692                          * pci_map_page() is implicitly cast as a uint32_t.
693                          * Although dma_addr_t can be 64-bit, the address
694                          * returned by pci_map_page() is always 32-bit
695                          * addressable (as defined by the pci/dma subsystem)
696                          */
697                         CurDesc[FragmentNumber++].DataBufferPtrLow =
698                             pci_map_page(etdev->pdev,
699                                          pFragList[loopIndex - 1].page,
700                                          pFragList[loopIndex - 1].page_offset,
701                                          pFragList[loopIndex - 1].size,
702                                          PCI_DMA_TODEVICE);
703                 }
704         }
705
706         if (FragmentNumber == 0) {
707                 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
708                 return -EIO;
709         }
710
711         if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
712                 if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
713                     PARM_TX_NUM_BUFS_DEF) {
714                         CurDesc[FragmentNumber - 1].word3.value = 0x5;
715                         etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
716                 } else {
717                         CurDesc[FragmentNumber - 1].word3.value = 0x1;
718                 }
719         } else {
720                 CurDesc[FragmentNumber - 1].word3.value = 0x5;
721         }
722
723         CurDesc[0].word3.bits.f = 1;
724
725         pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
726         pMpTcb->PacketStaleCount = 0;
727
728         spin_lock_irqsave(&etdev->SendHWLock, flags);
729
730         thiscopy = NUM_DESC_PER_RING_TX -
731                                 INDEX10(etdev->TxRing.txDmaReadyToSend);
732
733         if (thiscopy >= FragmentNumber) {
734                 remainder = 0;
735                 thiscopy = FragmentNumber;
736         } else {
737                 remainder = FragmentNumber - thiscopy;
738         }
739
740         memcpy(etdev->TxRing.pTxDescRingVa +
741                INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc,
742                sizeof(TX_DESC_ENTRY_t) * thiscopy);
743
744         add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy);
745
746         if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 ||
747             INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
748                 etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK;
749                 etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP;
750         }
751
752         if (remainder) {
753                 memcpy(etdev->TxRing.pTxDescRingVa,
754                        CurDesc + thiscopy,
755                        sizeof(TX_DESC_ENTRY_t) * remainder);
756
757                 add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder);
758         }
759
760         if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) {
761                 if (etdev->TxRing.txDmaReadyToSend)
762                         pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
763                 else
764                         pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
765         } else
766                 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1;
767
768         spin_lock(&etdev->TCBSendQLock);
769
770         if (etdev->TxRing.CurrSendTail)
771                 etdev->TxRing.CurrSendTail->Next = pMpTcb;
772         else
773                 etdev->TxRing.CurrSendHead = pMpTcb;
774
775         etdev->TxRing.CurrSendTail = pMpTcb;
776
777         DBG_ASSERT(pMpTcb->Next == NULL);
778
779         etdev->TxRing.nBusySend++;
780
781         spin_unlock(&etdev->TCBSendQLock);
782
783         /* Write the new write pointer back to the device. */
784         writel(etdev->TxRing.txDmaReadyToSend,
785                &etdev->regs->txdma.service_request);
786
787         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
788          * timer to wake us up if this packet isn't followed by N more.
789          */
790         if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
791                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
792                        &etdev->regs->global.watchdog_timer);
793         }
794
795         spin_unlock_irqrestore(&etdev->SendHWLock, flags);
796
797         DBG_TX_LEAVE(et131x_dbginfo);
798         return 0;
799 }
800
801 /*
802  * NOTE: For now, keep this older version of NICSendPacket around for
803  * reference, even though it's not used
804  */
805 #if 0
806
807 /**
808  * NICSendPacket - NIC specific send handler.
809  * @etdev: pointer to our adapter
810  * @pMpTcb: pointer to MP_TCB
811  *
812  * Returns 0 on succes, errno on failure.
813  *
814  * This version of the send routine is designed for version A silicon.
815  * Assumption - Send spinlock has been acquired.
816  */
817 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
818 {
819         uint32_t loopIndex, fragIndex, loopEnd;
820         uint32_t splitfirstelem = 0;
821         uint32_t SegmentSize = 0;
822         TX_DESC_ENTRY_t CurDesc;
823         TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
824         uint32_t SlotsAvailable;
825         DMA10W_t ServiceComplete;
826         unsigned int flags;
827         struct sk_buff *pPacket = pMpTcb->Packet;
828         uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
829         struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
830
831         DBG_TX_ENTER(et131x_dbginfo);
832
833         ServiceComplete.value =
834                 readl(&etdev->regs->txdma.NewServiceComplete.value);
835
836         /*
837          * Attempt to fix TWO hardware bugs:
838          * 1)  NEVER write an odd number of descriptors.
839          * 2)  If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
840          *     packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
841          *     descriptor IN HALF DUPLEX MODE ONLY
842          * NOTE that (2) interacts with (1).  If the packet is less than
843          * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
844          * Therefore if it is even now, it will eventually end up odd, and
845          * so will need adjusting.
846          *
847          * VLAN tags get involved since VLAN tags add another one or two
848          * segments.
849          */
850         DBG_TX(et131x_dbginfo,
851                "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
852
853         if ((etdev->duplex_mode == 0)
854             && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
855                 DBG_TX(et131x_dbginfo,
856                        "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
857                 if ((FragListCount & 0x1) == 0) {
858                         DBG_TX(et131x_dbginfo,
859                                "Even number of descs, split 1st elem\n");
860                         splitfirstelem = 1;
861                         /* SegmentSize = pFragList[0].size / 2; */
862                         SegmentSize = (pPacket->len - pPacket->data_len) / 2;
863                 }
864         } else if (FragListCount & 0x1) {
865                 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
866
867                 splitfirstelem = 1;
868                 /* SegmentSize = pFragList[0].size / 2; */
869                 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
870         }
871
872         spin_lock_irqsave(&etdev->SendHWLock, flags);
873
874         if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
875             ServiceComplete.bits.serv_cpl_wrap) {
876                 /* The ring hasn't wrapped.  Slots available should be
877                  * (RING_SIZE) -  the difference between the two pointers.
878                  */
879                 SlotsAvailable = NUM_DESC_PER_RING_TX -
880                     (etdev->TxRing.txDmaReadyToSend.bits.serv_req -
881                      ServiceComplete.bits.serv_cpl);
882         } else {
883                 /* The ring has wrapped.  Slots available should be the
884                  * difference between the two pointers.
885                  */
886                 SlotsAvailable = ServiceComplete.bits.serv_cpl -
887                     etdev->TxRing.txDmaReadyToSend.bits.serv_req;
888         }
889
890         if ((FragListCount + splitfirstelem) > SlotsAvailable) {
891                 DBG_WARNING(et131x_dbginfo,
892                             "Not Enough Space in Tx Desc Ring\n");
893                 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
894                 return -ENOMEM;
895         }
896
897         loopEnd = (FragListCount) + splitfirstelem;
898         fragIndex = 0;
899
900         DBG_TX(et131x_dbginfo,
901                "TCB           : 0x%p\n"
902                "Packet (SKB)  : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
903                "FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n",
904                pMpTcb,
905                pPacket, pPacket->len, pPacket->data_len,
906                FragListCount, splitfirstelem, loopEnd);
907
908         for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
909                 if (loopIndex > splitfirstelem)
910                         fragIndex++;
911
912                 DBG_TX(et131x_dbginfo,
913                        "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
914                        fragIndex);
915
916                 /* If there is something in this element, let's get a
917                  * descriptor from the ring and get the necessary data
918                  */
919                 DBG_TX(et131x_dbginfo,
920                        "Packet Length %d,"
921                        "filling desc entry %d\n",
922                        pPacket->len,
923                        etdev->TxRing.txDmaReadyToSend.bits.serv_req);
924
925                 /*
926                  * NOTE - Should we do a paranoia check here to make sure the fragment
927                  * actually has a length? It's HIGHLY unlikely the fragment would
928                  * contain no data...
929                  */
930                 if (1) {
931                         /* NOTE - Currently always getting 32-bit addrs, and
932                          * dma_addr_t is only 32-bit, so leave "high" ptr
933                          * value out for now
934                          * CurDesc.DataBufferPtrHigh = 0;
935                          */
936
937                         CurDesc.word2.value = 0;
938                         CurDesc.word3.value = 0;
939
940                         if (fragIndex == 0) {
941                                 if (splitfirstelem) {
942                                         DBG_TX(et131x_dbginfo,
943                                                "Split first element: YES\n");
944
945                                         if (loopIndex == 0) {
946                                                 DBG_TX(et131x_dbginfo,
947                                                        "Got fragment of length %d, fragIndex: %d\n",
948                                                        pPacket->len -
949                                                        pPacket->data_len,
950                                                        fragIndex);
951                                                 DBG_TX(et131x_dbginfo,
952                                                        "SegmentSize: %d\n",
953                                                        SegmentSize);
954
955                                                 CurDesc.word2.bits.
956                                                     length_in_bytes =
957                                                     SegmentSize;
958                                                 CurDesc.DataBufferPtrLow =
959                                                     pci_map_single(etdev->
960                                                                    pdev,
961                                                                    pPacket->
962                                                                    data,
963                                                                    SegmentSize,
964                                                                    PCI_DMA_TODEVICE);
965                                                 DBG_TX(et131x_dbginfo,
966                                                        "pci_map_single() returns: 0x%08x\n",
967                                                        CurDesc.
968                                                        DataBufferPtrLow);
969                                         } else {
970                                                 DBG_TX(et131x_dbginfo,
971                                                        "Got fragment of length %d, fragIndex: %d\n",
972                                                        pPacket->len -
973                                                        pPacket->data_len,
974                                                        fragIndex);
975                                                 DBG_TX(et131x_dbginfo,
976                                                        "Leftover Size: %d\n",
977                                                        (pPacket->len -
978                                                         pPacket->data_len -
979                                                         SegmentSize));
980
981                                                 CurDesc.word2.bits.
982                                                     length_in_bytes =
983                                                     ((pPacket->len -
984                                                       pPacket->data_len) -
985                                                      SegmentSize);
986                                                 CurDesc.DataBufferPtrLow =
987                                                     pci_map_single(etdev->
988                                                                    pdev,
989                                                                    (pPacket->
990                                                                     data +
991                                                                     SegmentSize),
992                                                                    (pPacket->
993                                                                     len -
994                                                                     pPacket->
995                                                                     data_len -
996                                                                     SegmentSize),
997                                                                    PCI_DMA_TODEVICE);
998                                                 DBG_TX(et131x_dbginfo,
999                                                        "pci_map_single() returns: 0x%08x\n",
1000                                                        CurDesc.
1001                                                        DataBufferPtrLow);
1002                                         }
1003                                 } else {
1004                                         DBG_TX(et131x_dbginfo,
1005                                                "Split first element: NO\n");
1006
1007                                         CurDesc.word2.bits.length_in_bytes =
1008                                             pPacket->len - pPacket->data_len;
1009
1010                                         CurDesc.DataBufferPtrLow =
1011                                             pci_map_single(etdev->pdev,
1012                                                            pPacket->data,
1013                                                            (pPacket->len -
1014                                                             pPacket->data_len),
1015                                                            PCI_DMA_TODEVICE);
1016                                         DBG_TX(et131x_dbginfo,
1017                                                "pci_map_single() returns: 0x%08x\n",
1018                                                CurDesc.DataBufferPtrLow);
1019                                 }
1020                         } else {
1021
1022                                 CurDesc.word2.bits.length_in_bytes =
1023                                     pFragList[fragIndex - 1].size;
1024                                 CurDesc.DataBufferPtrLow =
1025                                     pci_map_page(etdev->pdev,
1026                                                  pFragList[fragIndex - 1].page,
1027                                                  pFragList[fragIndex -
1028                                                            1].page_offset,
1029                                                  pFragList[fragIndex - 1].size,
1030                                                  PCI_DMA_TODEVICE);
1031                                 DBG_TX(et131x_dbginfo,
1032                                        "pci_map_page() returns: 0x%08x\n",
1033                                        CurDesc.DataBufferPtrLow);
1034                         }
1035
1036                         if (loopIndex == 0) {
1037                                 /* This is the first descriptor of the packet
1038                                  *
1039                                  * Set the "f" bit to indicate this is the
1040                                  * first descriptor in the packet.
1041                                  */
1042                                 DBG_TX(et131x_dbginfo,
1043                                        "This is our FIRST descriptor\n");
1044                                 CurDesc.word3.bits.f = 1;
1045
1046                                 pMpTcb->WrIndexStart =
1047                                     etdev->TxRing.txDmaReadyToSend;
1048                         }
1049
1050                         if ((loopIndex == (loopEnd - 1)) &&
1051                             (etdev->duplex_mode ||
1052                              (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1053                                 /* This is the Last descriptor of the packet */
1054                                 DBG_TX(et131x_dbginfo,
1055                                        "THIS is our LAST descriptor\n");
1056
1057                                 if (etdev->linkspeed ==
1058                                     TRUEPHY_SPEED_1000MBPS) {
1059                                         if (++etdev->TxRing.
1060                                             TxPacketsSinceLastinterrupt >=
1061                                             PARM_TX_NUM_BUFS_DEF) {
1062                                                 CurDesc.word3.value = 0x5;
1063                                                 etdev->TxRing.
1064                                                     TxPacketsSinceLastinterrupt
1065                                                     = 0;
1066                                         } else {
1067                                                 CurDesc.word3.value = 0x1;
1068                                         }
1069                                 } else {
1070                                         CurDesc.word3.value = 0x5;
1071                                 }
1072
1073                                 /* Following index will be used during freeing
1074                                  * of packet
1075                                  */
1076                                 pMpTcb->WrIndex =
1077                                     etdev->TxRing.txDmaReadyToSend;
1078                                 pMpTcb->PacketStaleCount = 0;
1079                         }
1080
1081                         /* Copy the descriptor (filled above) into the
1082                          * descriptor ring at the next free entry.  Advance
1083                          * the "next free entry" variable
1084                          */
1085                         memcpy(etdev->TxRing.pTxDescRingVa +
1086                                etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1087                                &CurDesc, sizeof(TX_DESC_ENTRY_t));
1088
1089                         CurDescPostCopy =
1090                             etdev->TxRing.pTxDescRingVa +
1091                             etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1092
1093                         DBG_TX(et131x_dbginfo,
1094                                "CURRENT DESCRIPTOR\n"
1095                                "\tAddress           : 0x%p\n"
1096                                "\tDataBufferPtrHigh : 0x%08x\n"
1097                                "\tDataBufferPtrLow  : 0x%08x\n"
1098                                "\tword2             : 0x%08x\n"
1099                                "\tword3             : 0x%08x\n",
1100                                CurDescPostCopy,
1101                                CurDescPostCopy->DataBufferPtrHigh,
1102                                CurDescPostCopy->DataBufferPtrLow,
1103                                CurDescPostCopy->word2.value,
1104                                CurDescPostCopy->word3.value);
1105
1106                         if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1107                             NUM_DESC_PER_RING_TX) {
1108                                 if (etdev->TxRing.txDmaReadyToSend.bits.
1109                                     serv_req_wrap) {
1110                                         etdev->TxRing.txDmaReadyToSend.
1111                                             value = 0;
1112                                 } else {
1113                                         etdev->TxRing.txDmaReadyToSend.
1114                                             value = 0x400;
1115                                 }
1116                         }
1117                 }
1118         }
1119
1120         if (etdev->duplex_mode == 0 &&
1121             pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1122                 /* NOTE - Same 32/64-bit issue as above... */
1123                 CurDesc.DataBufferPtrHigh = 0x0;
1124                 CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa;
1125                 CurDesc.word2.value = 0;
1126
1127                 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1128                         if (++etdev->TxRing.TxPacketsSinceLastinterrupt >=
1129                             PARM_TX_NUM_BUFS_DEF) {
1130                                 CurDesc.word3.value = 0x5;
1131                                 etdev->TxRing.TxPacketsSinceLastinterrupt =
1132                                     0;
1133                         } else {
1134                                 CurDesc.word3.value = 0x1;
1135                         }
1136                 } else {
1137                         CurDesc.word3.value = 0x5;
1138                 }
1139
1140                 CurDesc.word2.bits.length_in_bytes =
1141                     NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1142
1143                 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend;
1144
1145                 memcpy(etdev->TxRing.pTxDescRingVa +
1146                        etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1147                        &CurDesc, sizeof(TX_DESC_ENTRY_t));
1148
1149                 CurDescPostCopy =
1150                     etdev->TxRing.pTxDescRingVa +
1151                     etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1152
1153                 DBG_TX(et131x_dbginfo,
1154                        "CURRENT DESCRIPTOR\n"
1155                        "\tAddress           : 0x%p\n"
1156                        "\tDataBufferPtrHigh : 0x%08x\n"
1157                        "\tDataBufferPtrLow  : 0x%08x\n"
1158                        "\tword2             : 0x%08x\n"
1159                        "\tword3             : 0x%08x\n",
1160                        CurDescPostCopy,
1161                        CurDescPostCopy->DataBufferPtrHigh,
1162                        CurDescPostCopy->DataBufferPtrLow,
1163                        CurDescPostCopy->word2.value,
1164                        CurDescPostCopy->word3.value);
1165
1166                 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1167                     NUM_DESC_PER_RING_TX) {
1168                         if (etdev->TxRing.txDmaReadyToSend.bits.
1169                             serv_req_wrap) {
1170                                 etdev->TxRing.txDmaReadyToSend.value = 0;
1171                         } else {
1172                                 etdev->TxRing.txDmaReadyToSend.value = 0x400;
1173                         }
1174                 }
1175
1176                 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1177                        /* etdev->TxRing.txDmaReadyToSend.value, */
1178                        etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1179                        NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1180         }
1181
1182         spin_lock(&etdev->TCBSendQLock);
1183
1184         if (etdev->TxRing.CurrSendTail)
1185                 etdev->TxRing.CurrSendTail->Next = pMpTcb;
1186         else
1187                 etdev->TxRing.CurrSendHead = pMpTcb;
1188
1189         etdev->TxRing.CurrSendTail = pMpTcb;
1190
1191         DBG_ASSERT(pMpTcb->Next == NULL);
1192
1193         etdev->TxRing.nBusySend++;
1194
1195         spin_unlock(&etdev->TCBSendQLock);
1196
1197         /* Write the new write pointer back to the device. */
1198         writel(etdev->TxRing.txDmaReadyToSend.value,
1199                &etdev->regs->txdma.service_request.value);
1200
1201 #ifdef CONFIG_ET131X_DEBUG
1202         DumpDeviceBlock(DBG_TX_ON, etdev, 1);
1203 #endif
1204
1205         /* For Gig only, we use Tx Interrupt coalescing.  Enable the software
1206          * timer to wake us up if this packet isn't followed by N more.
1207          */
1208         if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1209                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1210                        &etdev->regs->global.watchdog_timer);
1211         }
1212
1213         spin_unlock_irqrestore(&etdev->SendHWLock, flags);
1214
1215         DBG_TX_LEAVE(et131x_dbginfo);
1216         return 0;
1217 }
1218
1219 #endif
1220
1221 /**
1222  * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1223  * @etdev: pointer to our adapter
1224  * @pMpTcb: pointer to MP_TCB
1225  *
1226  * Assumption - Send spinlock has been acquired
1227  */
1228 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
1229                                                         PMP_TCB pMpTcb)
1230 {
1231         unsigned long flags;
1232         TX_DESC_ENTRY_t *desc = NULL;
1233         struct net_device_stats *stats = &etdev->net_stats;
1234
1235         if (MP_TEST_FLAG(pMpTcb, fMP_DEST_BROAD))
1236                 atomic_inc(&etdev->Stats.brdcstxmt);
1237         else if (MP_TEST_FLAG(pMpTcb, fMP_DEST_MULTI))
1238                 atomic_inc(&etdev->Stats.multixmt);
1239         else
1240                 atomic_inc(&etdev->Stats.unixmt);
1241
1242         if (pMpTcb->Packet) {
1243                 stats->tx_bytes += pMpTcb->Packet->len;
1244
1245                 /* Iterate through the TX descriptors on the ring
1246                  * corresponding to this packet and umap the fragments
1247                  * they point to
1248                  */
1249                 DBG_TX(et131x_dbginfo,
1250                        "Unmap descriptors Here\n"
1251                        "TCB                  : 0x%p\n"
1252                        "TCB Next             : 0x%p\n"
1253                        "TCB PacketLength     : %d\n"
1254                        "TCB WrIndexS.value   : 0x%08x\n"
1255                        "TCB WrIndex.value    : 0x%08x\n",
1256                        pMpTcb,
1257                        pMpTcb->Next,
1258                        pMpTcb->PacketLength,
1259                        pMpTcb->WrIndexStart,
1260                        pMpTcb->WrIndex);
1261
1262                 do {
1263                         desc =
1264                             (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
1265                                 INDEX10(pMpTcb->WrIndexStart));
1266
1267                         DBG_TX(et131x_dbginfo,
1268                                "CURRENT DESCRIPTOR\n"
1269                                "\tAddress           : 0x%p\n"
1270                                "\tDataBufferPtrHigh : 0x%08x\n"
1271                                "\tDataBufferPtrLow  : 0x%08x\n"
1272                                "\tword2             : 0x%08x\n"
1273                                "\tword3             : 0x%08x\n",
1274                                desc,
1275                                desc->DataBufferPtrHigh,
1276                                desc->DataBufferPtrLow,
1277                                desc->word2.value,
1278                                desc->word3.value);
1279
1280                         pci_unmap_single(etdev->pdev,
1281                                          desc->DataBufferPtrLow,
1282                                          desc->word2.value, PCI_DMA_TODEVICE);
1283
1284                         add_10bit(&pMpTcb->WrIndexStart, 1);
1285                         if (INDEX10(pMpTcb->WrIndexStart) >=
1286                             NUM_DESC_PER_RING_TX) {
1287                                 pMpTcb->WrIndexStart &= ~ET_DMA10_MASK;
1288                                 pMpTcb->WrIndexStart ^= ET_DMA10_WRAP;
1289                         }
1290                 } while (desc != (etdev->TxRing.pTxDescRingVa +
1291                                 INDEX10(pMpTcb->WrIndex)));
1292
1293                 DBG_TX(et131x_dbginfo,
1294                        "Free Packet (SKB)   : 0x%p\n", pMpTcb->Packet);
1295
1296                 dev_kfree_skb_any(pMpTcb->Packet);
1297         }
1298
1299         memset(pMpTcb, 0, sizeof(MP_TCB));
1300
1301         /* Add the TCB to the Ready Q */
1302         spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
1303
1304         etdev->Stats.opackets++;
1305
1306         if (etdev->TxRing.TCBReadyQueueTail) {
1307                 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1308         } else {
1309                 /* Apparently ready Q is empty. */
1310                 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
1311         }
1312
1313         etdev->TxRing.TCBReadyQueueTail = pMpTcb;
1314
1315         spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
1316
1317         DBG_ASSERT(etdev->TxRing.nBusySend >= 0);
1318 }
1319
1320 /**
1321  * et131x_free_busy_send_packets - Free and complete the stopped active sends
1322  * @etdev: pointer to our adapter
1323  *
1324  * Assumption - Send spinlock has been acquired
1325  */
1326 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
1327 {
1328         PMP_TCB pMpTcb;
1329         struct list_head *entry;
1330         unsigned long flags;
1331         uint32_t FreeCounter = 0;
1332
1333         DBG_ENTER(et131x_dbginfo);
1334
1335         while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
1336                 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1337
1338                 etdev->TxRing.nWaitSend--;
1339                 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1340
1341                 entry = etdev->TxRing.SendWaitQueue.next;
1342         }
1343
1344         etdev->TxRing.nWaitSend = 0;
1345
1346         /* Any packets being sent? Check the first TCB on the send list */
1347         spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1348
1349         pMpTcb = etdev->TxRing.CurrSendHead;
1350
1351         while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1352                 PMP_TCB pNext = pMpTcb->Next;
1353
1354                 etdev->TxRing.CurrSendHead = pNext;
1355
1356                 if (pNext == NULL)
1357                         etdev->TxRing.CurrSendTail = NULL;
1358
1359                 etdev->TxRing.nBusySend--;
1360
1361                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1362
1363                 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1364
1365                 FreeCounter++;
1366                 et131x_free_send_packet(etdev, pMpTcb);
1367
1368                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1369
1370                 pMpTcb = etdev->TxRing.CurrSendHead;
1371         }
1372
1373         if (FreeCounter == NUM_TCB) {
1374                 DBG_ERROR(et131x_dbginfo,
1375                     "MpFreeBusySendPackets exited loop for a bad reason\n");
1376                 BUG();
1377         }
1378
1379         spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1380
1381         etdev->TxRing.nBusySend = 0;
1382
1383         DBG_LEAVE(et131x_dbginfo);
1384 }
1385
1386 /**
1387  * et131x_handle_send_interrupt - Interrupt handler for sending processing
1388  * @etdev: pointer to our adapter
1389  *
1390  * Re-claim the send resources, complete sends and get more to send from
1391  * the send wait queue.
1392  *
1393  * Assumption - Send spinlock has been acquired
1394  */
1395 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
1396 {
1397         DBG_TX_ENTER(et131x_dbginfo);
1398
1399         /* Mark as completed any packets which have been sent by the device. */
1400         et131x_update_tcb_list(etdev);
1401
1402         /* If we queued any transmits because we didn't have any TCBs earlier,
1403          * dequeue and send those packets now, as long as we have free TCBs.
1404          */
1405         et131x_check_send_wait_list(etdev);
1406
1407         DBG_TX_LEAVE(et131x_dbginfo);
1408 }
1409
1410 /**
1411  * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1412  * @etdev: pointer to our adapter
1413  *
1414  * Re-claims the send resources and completes sends.  Can also be called as
1415  * part of the NIC send routine when the "ServiceComplete" indication has
1416  * wrapped.
1417  */
1418 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
1419 {
1420         unsigned long flags;
1421         u32 ServiceComplete;
1422         PMP_TCB pMpTcb;
1423         u32 index;
1424
1425         ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete);
1426         index = INDEX10(ServiceComplete);
1427
1428         /* Has the ring wrapped?  Process any descriptors that do not have
1429          * the same "wrap" indicator as the current completion indicator
1430          */
1431         spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1432
1433         pMpTcb = etdev->TxRing.CurrSendHead;
1434
1435         while (pMpTcb &&
1436                ((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) &&
1437                index < INDEX10(pMpTcb->WrIndex)) {
1438                 etdev->TxRing.nBusySend--;
1439                 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1440                 if (pMpTcb->Next == NULL)
1441                         etdev->TxRing.CurrSendTail = NULL;
1442
1443                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1444                 et131x_free_send_packet(etdev, pMpTcb);
1445                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1446
1447                 /* Goto the next packet */
1448                 pMpTcb = etdev->TxRing.CurrSendHead;
1449         }
1450         while (pMpTcb &&
1451                !((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP)
1452                && index > (pMpTcb->WrIndex & ET_DMA10_MASK)) {
1453                 etdev->TxRing.nBusySend--;
1454                 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1455                 if (pMpTcb->Next == NULL)
1456                         etdev->TxRing.CurrSendTail = NULL;
1457
1458                 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1459                 et131x_free_send_packet(etdev, pMpTcb);
1460                 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1461
1462                 /* Goto the next packet */
1463                 pMpTcb = etdev->TxRing.CurrSendHead;
1464         }
1465
1466         /* Wake up the queue when we hit a low-water mark */
1467         if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
1468                 netif_wake_queue(etdev->netdev);
1469
1470         spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1471 }
1472
1473 /**
1474  * et131x_check_send_wait_list - Helper routine for the interrupt handler
1475  * @etdev: pointer to our adapter
1476  *
1477  * Takes packets from the send wait queue and posts them to the device (if
1478  * room available).
1479  */
1480 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
1481 {
1482         unsigned long flags;
1483
1484         spin_lock_irqsave(&etdev->SendWaitLock, flags);
1485
1486         while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
1487                                 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
1488                 struct list_head *entry;
1489
1490                 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1491
1492                 entry = etdev->TxRing.SendWaitQueue.next;
1493
1494                 etdev->TxRing.nWaitSend--;
1495
1496                 DBG_WARNING(et131x_dbginfo,
1497                     "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1498                                 etdev->TxRing.nWaitSend);
1499         }
1500
1501         spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1502 }