Staging: et131x: Kill off the rxdma type
[safe/jmp/linux-2.6] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_rx.c - Routines used to perform data reception
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
89
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92
93 #include "et1310_rx.h"
94
95
96 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
97
98 /**
99  * et131x_rx_dma_memory_alloc
100  * @adapter: pointer to our private adapter structure
101  *
102  * Returns 0 on success and errno on failure (as defined in errno.h)
103  *
104  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
105  * and the Packet Status Ring.
106  */
107 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
108 {
109         u32 i, j;
110         u32 bufsize;
111         u32 pktStatRingSize, FBRChunkSize;
112         RX_RING_t *rx_ring;
113
114         /* Setup some convenience pointers */
115         rx_ring = (RX_RING_t *) &adapter->RxRing;
116
117         /* Alloc memory for the lookup table */
118 #ifdef USE_FBR0
119         rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
120 #endif
121
122         rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
123
124         /* The first thing we will do is configure the sizes of the buffer
125          * rings. These will change based on jumbo packet support.  Larger
126          * jumbo packets increases the size of each entry in FBR0, and the
127          * number of entries in FBR0, while at the same time decreasing the
128          * number of entries in FBR1.
129          *
130          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
131          * entries are huge in order to accomodate a "jumbo" frame, then it
132          * will have less entries.  Conversely, FBR1 will now be relied upon
133          * to carry more "normal" frames, thus it's entry size also increases
134          * and the number of entries goes up too (since it now carries
135          * "small" + "regular" packets.
136          *
137          * In this scheme, we try to maintain 512 entries between the two
138          * rings. Also, FBR1 remains a constant size - when it's size doubles
139          * the number of entries halves.  FBR0 increases in size, however.
140          */
141
142         if (adapter->RegistryJumboPacket < 2048) {
143 #ifdef USE_FBR0
144                 rx_ring->Fbr0BufferSize = 256;
145                 rx_ring->Fbr0NumEntries = 512;
146 #endif
147                 rx_ring->Fbr1BufferSize = 2048;
148                 rx_ring->Fbr1NumEntries = 512;
149         } else if (adapter->RegistryJumboPacket < 4096) {
150 #ifdef USE_FBR0
151                 rx_ring->Fbr0BufferSize = 512;
152                 rx_ring->Fbr0NumEntries = 1024;
153 #endif
154                 rx_ring->Fbr1BufferSize = 4096;
155                 rx_ring->Fbr1NumEntries = 512;
156         } else {
157 #ifdef USE_FBR0
158                 rx_ring->Fbr0BufferSize = 1024;
159                 rx_ring->Fbr0NumEntries = 768;
160 #endif
161                 rx_ring->Fbr1BufferSize = 16384;
162                 rx_ring->Fbr1NumEntries = 128;
163         }
164
165 #ifdef USE_FBR0
166         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
167             adapter->RxRing.Fbr1NumEntries;
168 #else
169         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
170 #endif
171
172         /* Allocate an area of memory for Free Buffer Ring 1 */
173         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
174         rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
175                                                     bufsize,
176                                                     &rx_ring->pFbr1RingPa);
177         if (!rx_ring->pFbr1RingVa) {
178                 dev_err(&adapter->pdev->dev,
179                           "Cannot alloc memory for Free Buffer Ring 1\n");
180                 return -ENOMEM;
181         }
182
183         /* Save physical address
184          *
185          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
186          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
187          * are ever returned, make sure the high part is retrieved here
188          * before storing the adjusted address.
189          */
190         rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
191
192         /* Align Free Buffer Ring 1 on a 4K boundary */
193         et131x_align_allocated_memory(adapter,
194                                       &rx_ring->Fbr1Realpa,
195                                       &rx_ring->Fbr1offset, 0x0FFF);
196
197         rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
198                                         rx_ring->Fbr1offset);
199
200 #ifdef USE_FBR0
201         /* Allocate an area of memory for Free Buffer Ring 0 */
202         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
203         rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
204                                                     bufsize,
205                                                     &rx_ring->pFbr0RingPa);
206         if (!rx_ring->pFbr0RingVa) {
207                 dev_err(&adapter->pdev->dev,
208                           "Cannot alloc memory for Free Buffer Ring 0\n");
209                 return -ENOMEM;
210         }
211
212         /* Save physical address
213          *
214          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
215          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
216          * are ever returned, make sure the high part is retrieved here before
217          * storing the adjusted address.
218          */
219         rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
220
221         /* Align Free Buffer Ring 0 on a 4K boundary */
222         et131x_align_allocated_memory(adapter,
223                                       &rx_ring->Fbr0Realpa,
224                                       &rx_ring->Fbr0offset, 0x0FFF);
225
226         rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
227                                         rx_ring->Fbr0offset);
228 #endif
229
230         for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
231              i++) {
232                 u64 Fbr1Offset;
233                 u64 Fbr1TempPa;
234                 u32 Fbr1Align;
235
236                 /* This code allocates an area of memory big enough for N
237                  * free buffers + (buffer_size - 1) so that the buffers can
238                  * be aligned on 4k boundaries.  If each buffer were aligned
239                  * to a buffer_size boundary, the effect would be to double
240                  * the size of FBR0.  By allocating N buffers at once, we
241                  * reduce this overhead.
242                  */
243                 if (rx_ring->Fbr1BufferSize > 4096)
244                         Fbr1Align = 4096;
245                 else
246                         Fbr1Align = rx_ring->Fbr1BufferSize;
247
248                 FBRChunkSize =
249                     (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
250                 rx_ring->Fbr1MemVa[i] =
251                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
252                                          &rx_ring->Fbr1MemPa[i]);
253
254                 if (!rx_ring->Fbr1MemVa[i]) {
255                 dev_err(&adapter->pdev->dev,
256                                 "Could not alloc memory\n");
257                         return -ENOMEM;
258                 }
259
260                 /* See NOTE in "Save Physical Address" comment above */
261                 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
262
263                 et131x_align_allocated_memory(adapter,
264                                               &Fbr1TempPa,
265                                               &Fbr1Offset, (Fbr1Align - 1));
266
267                 for (j = 0; j < FBR_CHUNKS; j++) {
268                         u32 index = (i * FBR_CHUNKS) + j;
269
270                         /* Save the Virtual address of this index for quick
271                          * access later
272                          */
273                         rx_ring->Fbr[1]->Va[index] =
274                             (uint8_t *) rx_ring->Fbr1MemVa[i] +
275                             (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
276
277                         /* now store the physical address in the descriptor
278                          * so the device can access it
279                          */
280                         rx_ring->Fbr[1]->PAHigh[index] =
281                             (u32) (Fbr1TempPa >> 32);
282                         rx_ring->Fbr[1]->PALow[index] = (u32) Fbr1TempPa;
283
284                         Fbr1TempPa += rx_ring->Fbr1BufferSize;
285
286                         rx_ring->Fbr[1]->Buffer1[index] =
287                             rx_ring->Fbr[1]->Va[index];
288                         rx_ring->Fbr[1]->Buffer2[index] =
289                             rx_ring->Fbr[1]->Va[index] - 4;
290                 }
291         }
292
293 #ifdef USE_FBR0
294         /* Same for FBR0 (if in use) */
295         for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
296              i++) {
297                 u64 Fbr0Offset;
298                 u64 Fbr0TempPa;
299
300                 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
301                 rx_ring->Fbr0MemVa[i] =
302                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
303                                          &rx_ring->Fbr0MemPa[i]);
304
305                 if (!rx_ring->Fbr0MemVa[i]) {
306                         dev_err(&adapter->pdev->dev,
307                                 "Could not alloc memory\n");
308                         return -ENOMEM;
309                 }
310
311                 /* See NOTE in "Save Physical Address" comment above */
312                 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
313
314                 et131x_align_allocated_memory(adapter,
315                                               &Fbr0TempPa,
316                                               &Fbr0Offset,
317                                               rx_ring->Fbr0BufferSize - 1);
318
319                 for (j = 0; j < FBR_CHUNKS; j++) {
320                         u32 index = (i * FBR_CHUNKS) + j;
321
322                         rx_ring->Fbr[0]->Va[index] =
323                             (uint8_t *) rx_ring->Fbr0MemVa[i] +
324                             (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
325
326                         rx_ring->Fbr[0]->PAHigh[index] =
327                             (u32) (Fbr0TempPa >> 32);
328                         rx_ring->Fbr[0]->PALow[index] = (u32) Fbr0TempPa;
329
330                         Fbr0TempPa += rx_ring->Fbr0BufferSize;
331
332                         rx_ring->Fbr[0]->Buffer1[index] =
333                             rx_ring->Fbr[0]->Va[index];
334                         rx_ring->Fbr[0]->Buffer2[index] =
335                             rx_ring->Fbr[0]->Va[index] - 4;
336                 }
337         }
338 #endif
339
340         /* Allocate an area of memory for FIFO of Packet Status ring entries */
341         pktStatRingSize =
342             sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
343
344         rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
345                                                   pktStatRingSize,
346                                                   &rx_ring->pPSRingPa);
347
348         if (!rx_ring->pPSRingVa) {
349                 dev_err(&adapter->pdev->dev,
350                           "Cannot alloc memory for Packet Status Ring\n");
351                 return -ENOMEM;
352         }
353         printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
354
355         /*
356          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
357          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
358          * are ever returned, make sure the high part is retrieved here before
359          * storing the adjusted address.
360          */
361
362         /* Allocate an area of memory for writeback of status information */
363         rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
364                                                     sizeof(RX_STATUS_BLOCK_t),
365                                                     &rx_ring->pRxStatusPa);
366         if (!rx_ring->pRxStatusVa) {
367                 dev_err(&adapter->pdev->dev,
368                           "Cannot alloc memory for Status Block\n");
369                 return -ENOMEM;
370         }
371         rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
372         printk("PRS %lx\n", (unsigned long)rx_ring->pRxStatusPa);
373
374         /* Recv
375          * pci_pool_create initializes a lookaside list. After successful
376          * creation, nonpaged fixed-size blocks can be allocated from and
377          * freed to the lookaside list.
378          * RFDs will be allocated from this pool.
379          */
380         rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
381                                                    sizeof(MP_RFD),
382                                                    0,
383                                                    SLAB_CACHE_DMA |
384                                                    SLAB_HWCACHE_ALIGN,
385                                                    NULL);
386
387         adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
388
389         /* The RFDs are going to be put on lists later on, so initialize the
390          * lists now.
391          */
392         INIT_LIST_HEAD(&rx_ring->RecvList);
393         return 0;
394 }
395
396 /**
397  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
398  * @adapter: pointer to our private adapter structure
399  */
400 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
401 {
402         u32 index;
403         u32 bufsize;
404         u32 pktStatRingSize;
405         PMP_RFD rfd;
406         RX_RING_t *rx_ring;
407
408         /* Setup some convenience pointers */
409         rx_ring = (RX_RING_t *) &adapter->RxRing;
410
411         /* Free RFDs and associated packet descriptors */
412         WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
413
414         while (!list_empty(&rx_ring->RecvList)) {
415                 rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
416                                                MP_RFD, list_node);
417
418                 list_del(&rfd->list_node);
419                 rfd->Packet = NULL;
420                 kmem_cache_free(adapter->RxRing.RecvLookaside, rfd);
421         }
422
423         /* Free Free Buffer Ring 1 */
424         if (rx_ring->pFbr1RingVa) {
425                 /* First the packet memory */
426                 for (index = 0; index <
427                      (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
428                         if (rx_ring->Fbr1MemVa[index]) {
429                                 u32 Fbr1Align;
430
431                                 if (rx_ring->Fbr1BufferSize > 4096)
432                                         Fbr1Align = 4096;
433                                 else
434                                         Fbr1Align = rx_ring->Fbr1BufferSize;
435
436                                 bufsize =
437                                     (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
438                                     Fbr1Align - 1;
439
440                                 pci_free_consistent(adapter->pdev,
441                                                     bufsize,
442                                                     rx_ring->Fbr1MemVa[index],
443                                                     rx_ring->Fbr1MemPa[index]);
444
445                                 rx_ring->Fbr1MemVa[index] = NULL;
446                         }
447                 }
448
449                 /* Now the FIFO itself */
450                 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
451                                 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
452
453                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
454                                                                 + 0xfff;
455
456                 pci_free_consistent(adapter->pdev, bufsize,
457                                 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
458
459                 rx_ring->pFbr1RingVa = NULL;
460         }
461
462 #ifdef USE_FBR0
463         /* Now the same for Free Buffer Ring 0 */
464         if (rx_ring->pFbr0RingVa) {
465                 /* First the packet memory */
466                 for (index = 0; index <
467                      (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
468                         if (rx_ring->Fbr0MemVa[index]) {
469                                 bufsize =
470                                     (rx_ring->Fbr0BufferSize *
471                                      (FBR_CHUNKS + 1)) - 1;
472
473                                 pci_free_consistent(adapter->pdev,
474                                                     bufsize,
475                                                     rx_ring->Fbr0MemVa[index],
476                                                     rx_ring->Fbr0MemPa[index]);
477
478                                 rx_ring->Fbr0MemVa[index] = NULL;
479                         }
480                 }
481
482                 /* Now the FIFO itself */
483                 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
484                                 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
485
486                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
487                                                                 + 0xfff;
488
489                 pci_free_consistent(adapter->pdev,
490                                     bufsize,
491                                     rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
492
493                 rx_ring->pFbr0RingVa = NULL;
494         }
495 #endif
496
497         /* Free Packet Status Ring */
498         if (rx_ring->pPSRingVa) {
499                 pktStatRingSize =
500                     sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
501
502                 pci_free_consistent(adapter->pdev, pktStatRingSize,
503                                     rx_ring->pPSRingVa, rx_ring->pPSRingPa);
504
505                 rx_ring->pPSRingVa = NULL;
506         }
507
508         /* Free area of memory for the writeback of status information */
509         if (rx_ring->pRxStatusVa) {
510                 pci_free_consistent(adapter->pdev,
511                                 sizeof(RX_STATUS_BLOCK_t),
512                                 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
513
514                 rx_ring->pRxStatusVa = NULL;
515         }
516
517         /* Free receive buffer pool */
518
519         /* Free receive packet pool */
520
521         /* Destroy the lookaside (RFD) pool */
522         if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
523                 kmem_cache_destroy(rx_ring->RecvLookaside);
524                 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
525         }
526
527         /* Free the FBR Lookup Table */
528 #ifdef USE_FBR0
529         kfree(rx_ring->Fbr[0]);
530 #endif
531
532         kfree(rx_ring->Fbr[1]);
533
534         /* Reset Counters */
535         rx_ring->nReadyRecv = 0;
536 }
537
538 /**
539  * et131x_init_recv - Initialize receive data structures.
540  * @adapter: pointer to our private adapter structure
541  *
542  * Returns 0 on success and errno on failure (as defined in errno.h)
543  */
544 int et131x_init_recv(struct et131x_adapter *adapter)
545 {
546         int status = -ENOMEM;
547         PMP_RFD rfd = NULL;
548         u32 rfdct;
549         u32 numrfd = 0;
550         RX_RING_t *rx_ring = NULL;
551
552         /* Setup some convenience pointers */
553         rx_ring = (RX_RING_t *) &adapter->RxRing;
554
555         /* Setup each RFD */
556         for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
557                 rfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
558                                                      GFP_ATOMIC | GFP_DMA);
559
560                 if (!rfd) {
561                         dev_err(&adapter->pdev->dev,
562                                   "Couldn't alloc RFD out of kmem_cache\n");
563                         status = -ENOMEM;
564                         continue;
565                 }
566
567                 rfd->Packet = NULL;
568
569                 /* Add this RFD to the RecvList */
570                 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
571
572                 /* Increment both the available RFD's, and the total RFD's. */
573                 rx_ring->nReadyRecv++;
574                 numrfd++;
575         }
576
577         if (numrfd > NIC_MIN_NUM_RFD)
578                 status = 0;
579
580         rx_ring->NumRfd = numrfd;
581
582         if (status != 0) {
583                 kmem_cache_free(rx_ring->RecvLookaside, rfd);
584                 dev_err(&adapter->pdev->dev,
585                           "Allocation problems in et131x_init_recv\n");
586         }
587         return status;
588 }
589
590 /**
591  * ConfigRxDmaRegs - Start of Rx_DMA init sequence
592  * @etdev: pointer to our adapter structure
593  */
594 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
595 {
596         struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
597         struct _rx_ring_t *rx_local = &etdev->RxRing;
598         struct fbr_desc *fbr_entry;
599         u32 entry;
600         u32 psr_num_des;
601         unsigned long flags;
602
603         /* Halt RXDMA to perform the reconfigure.  */
604         et131x_rx_dma_disable(etdev);
605
606         /* Load the completion writeback physical address
607          *
608          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
609          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
610          * are ever returned, make sure the high part is retrieved here
611          * before storing the adjusted address.
612          */
613         writel((u32) ((u64)rx_local->pRxStatusPa >> 32),
614                &rx_dma->dma_wb_base_hi);
615         writel((u32) rx_local->pRxStatusPa, &rx_dma->dma_wb_base_lo);
616
617         memset(rx_local->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
618
619         /* Set the address and parameters of the packet status ring into the
620          * 1310's registers
621          */
622         writel((u32) ((u64)rx_local->pPSRingPa >> 32),
623                &rx_dma->psr_base_hi);
624         writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
625         writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
626         writel(0, &rx_dma->psr_full_offset);
627
628         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
629         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
630                &rx_dma->psr_min_des);
631
632         spin_lock_irqsave(&etdev->RcvLock, flags);
633
634         /* These local variables track the PSR in the adapter structure */
635         rx_local->local_psr_full = 0;
636
637         /* Now's the best time to initialize FBR1 contents */
638         fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
639         for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
640                 fbr_entry->addr_hi = rx_local->Fbr[1]->PAHigh[entry];
641                 fbr_entry->addr_lo = rx_local->Fbr[1]->PALow[entry];
642                 fbr_entry->word2 = entry;
643                 fbr_entry++;
644         }
645
646         /* Set the address and parameters of Free buffer ring 1 (and 0 if
647          * required) into the 1310's registers
648          */
649         writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
650         writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
651         writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
652         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
653
654         /* This variable tracks the free buffer ring 1 full position, so it
655          * has to match the above.
656          */
657         rx_local->local_Fbr1_full = ET_DMA10_WRAP;
658         writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
659                &rx_dma->fbr1_min_des);
660
661 #ifdef USE_FBR0
662         /* Now's the best time to initialize FBR0 contents */
663         fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
664         for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
665                 fbr_entry->addr_hi = rx_local->Fbr[0]->PAHigh[entry];
666                 fbr_entry->addr_lo = rx_local->Fbr[0]->PALow[entry];
667                 fbr_entry->word2 = entry;
668                 fbr_entry++;
669         }
670
671         writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
672         writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
673         writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
674         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
675
676         /* This variable tracks the free buffer ring 0 full position, so it
677          * has to match the above.
678          */
679         rx_local->local_Fbr0_full = ET_DMA10_WRAP;
680         writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
681                &rx_dma->fbr0_min_des);
682 #endif
683
684         /* Program the number of packets we will receive before generating an
685          * interrupt.
686          * For version B silicon, this value gets updated once autoneg is
687          *complete.
688          */
689         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
690
691         /* The "time_done" is not working correctly to coalesce interrupts
692          * after a given time period, but rather is giving us an interrupt
693          * regardless of whether we have received packets.
694          * This value gets updated once autoneg is complete.
695          */
696         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
697
698         spin_unlock_irqrestore(&etdev->RcvLock, flags);
699 }
700
701 /**
702  * SetRxDmaTimer - Set the heartbeat timer according to line rate.
703  * @etdev: pointer to our adapter structure
704  */
705 void SetRxDmaTimer(struct et131x_adapter *etdev)
706 {
707         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
708          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
709          */
710         if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
711             (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
712                 writel(0, &etdev->regs->rxdma.max_pkt_time);
713                 writel(1, &etdev->regs->rxdma.num_pkt_done);
714         }
715 }
716
717 /**
718  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
719  * @etdev: pointer to our adapter structure
720  */
721 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
722 {
723         u32 csr;
724         /* Setup the receive dma configuration register */
725         writel(0x00002001, &etdev->regs->rxdma.csr);
726         csr = readl(&etdev->regs->rxdma.csr);
727         if ((csr & 0x00020000) != 1) {  /* Check halt status (bit 17) */
728                 udelay(5);
729                 csr = readl(&etdev->regs->rxdma.csr);
730                 if ((csr & 0x00020000) != 1)
731                         dev_err(&etdev->pdev->dev,
732                         "RX Dma failed to enter halt state. CSR 0x%08x\n",
733                                 csr);
734         }
735 }
736
737 /**
738  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
739  * @etdev: pointer to our adapter structure
740  */
741 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
742 {
743         /* Setup the receive dma configuration register for normal operation */
744         u32 csr =  0x2000;      /* FBR1 enable */
745
746         if (etdev->RxRing.Fbr1BufferSize == 4096)
747                 csr |= 0x0800;
748         else if (etdev->RxRing.Fbr1BufferSize == 8192)
749                 csr |= 0x1000;
750         else if (etdev->RxRing.Fbr1BufferSize == 16384)
751                 csr |= 0x1800;
752 #ifdef USE_FBR0
753         csr |= 0x0400;          /* FBR0 enable */
754         if (etdev->RxRing.Fbr0BufferSize == 256)
755                 csr |= 0x0100;
756         else if (etdev->RxRing.Fbr0BufferSize == 512)
757                 csr |= 0x0200;
758         else if (etdev->RxRing.Fbr0BufferSize == 1024)
759                 csr |= 0x0300;
760 #endif
761         writel(csr, &etdev->regs->rxdma.csr);
762
763         csr = readl(&etdev->regs->rxdma.csr);
764         if ((csr & 0x00020000) != 0) {
765                 udelay(5);
766                 csr = readl(&etdev->regs->rxdma.csr);
767                 if ((csr & 0x00020000) != 0) {
768                         dev_err(&etdev->pdev->dev,
769                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
770                                 csr);
771                 }
772         }
773 }
774
775 /**
776  * nic_rx_pkts - Checks the hardware for available packets
777  * @etdev: pointer to our adapter
778  *
779  * Returns rfd, a pointer to our MPRFD.
780  *
781  * Checks the hardware for available packets, using completion ring
782  * If packets are available, it gets an RFD from the RecvList, attaches
783  * the packet to it, puts the RFD in the RecvPendList, and also returns
784  * the pointer to the RFD.
785  */
786 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
787 {
788         struct _rx_ring_t *rx_local = &etdev->RxRing;
789         PRX_STATUS_BLOCK_t status;
790         PPKT_STAT_DESC_t psr;
791         PMP_RFD rfd;
792         u32 i;
793         uint8_t *buf;
794         unsigned long flags;
795         struct list_head *element;
796         uint8_t rindex;
797         uint16_t bindex;
798         u32 len;
799         PKT_STAT_DESC_WORD0_t Word0;
800
801         /* RX Status block is written by the DMA engine prior to every
802          * interrupt. It contains the next to be used entry in the Packet
803          * Status Ring, and also the two Free Buffer rings.
804          */
805         status = (PRX_STATUS_BLOCK_t) rx_local->pRxStatusVa;
806
807         /* FIXME: tidy later when conversions complete */
808         if (status->Word1.bits.PSRoffset ==
809                         (rx_local->local_psr_full & 0xFFF) &&
810                         status->Word1.bits.PSRwrap ==
811                         ((rx_local->local_psr_full >> 12) & 1)) {
812                 /* Looks like this ring is not updated yet */
813                 return NULL;
814         }
815
816         /* The packet status ring indicates that data is available. */
817         psr = (PPKT_STAT_DESC_t) (rx_local->pPSRingVa) +
818                         (rx_local->local_psr_full & 0xFFF);
819
820         /* Grab any information that is required once the PSR is
821          * advanced, since we can no longer rely on the memory being
822          * accurate
823          */
824         len = psr->word1.bits.length;
825         rindex = (uint8_t) psr->word1.bits.ri;
826         bindex = (uint16_t) psr->word1.bits.bi;
827         Word0 = psr->word0;
828
829         /* Indicate that we have used this PSR entry. */
830         /* FIXME wrap 12 */
831         add_12bit(&rx_local->local_psr_full, 1);
832         if ((rx_local->local_psr_full & 0xFFF)  > rx_local->PsrNumEntries - 1) {
833                 /* Clear psr full and toggle the wrap bit */
834                 rx_local->local_psr_full &=  ~0xFFF;
835                 rx_local->local_psr_full ^= 0x1000;
836         }
837
838         writel(rx_local->local_psr_full,
839                &etdev->regs->rxdma.psr_full_offset);
840
841 #ifndef USE_FBR0
842         if (rindex != 1) {
843                 return NULL;
844         }
845 #endif
846
847 #ifdef USE_FBR0
848         if (rindex > 1 ||
849                 (rindex == 0 &&
850                 bindex > rx_local->Fbr0NumEntries - 1) ||
851                 (rindex == 1 &&
852                 bindex > rx_local->Fbr1NumEntries - 1))
853 #else
854         if (rindex != 1 ||
855                 bindex > rx_local->Fbr1NumEntries - 1)
856 #endif
857         {
858                 /* Illegal buffer or ring index cannot be used by S/W*/
859                 dev_err(&etdev->pdev->dev,
860                           "NICRxPkts PSR Entry %d indicates "
861                           "length of %d and/or bad bi(%d)\n",
862                           rx_local->local_psr_full & 0xFFF,
863                           len, bindex);
864                 return NULL;
865         }
866
867         /* Get and fill the RFD. */
868         spin_lock_irqsave(&etdev->RcvLock, flags);
869
870         rfd = NULL;
871         element = rx_local->RecvList.next;
872         rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
873
874         if (rfd == NULL) {
875                 spin_unlock_irqrestore(&etdev->RcvLock, flags);
876                 return NULL;
877         }
878
879         list_del(&rfd->list_node);
880         rx_local->nReadyRecv--;
881
882         spin_unlock_irqrestore(&etdev->RcvLock, flags);
883
884         rfd->bufferindex = bindex;
885         rfd->ringindex = rindex;
886
887         /* In V1 silicon, there is a bug which screws up filtering of
888          * runt packets.  Therefore runt packet filtering is disabled
889          * in the MAC and the packets are dropped here.  They are
890          * also counted here.
891          */
892         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
893                 etdev->Stats.other_errors++;
894                 len = 0;
895         }
896
897         if (len) {
898                 if (etdev->ReplicaPhyLoopbk == 1) {
899                         buf = rx_local->Fbr[rindex]->Va[bindex];
900
901                         if (memcmp(&buf[6], &etdev->CurrentAddress[0],
902                                    ETH_ALEN) == 0) {
903                                 if (memcmp(&buf[42], "Replica packet",
904                                            ETH_HLEN)) {
905                                         etdev->ReplicaPhyLoopbkPF = 1;
906                                 }
907                         }
908                 }
909
910                 /* Determine if this is a multicast packet coming in */
911                 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
912                     !(Word0.value & ALCATEL_BROADCAST_PKT)) {
913                         /* Promiscuous mode and Multicast mode are
914                          * not mutually exclusive as was first
915                          * thought.  I guess Promiscuous is just
916                          * considered a super-set of the other
917                          * filters. Generally filter is 0x2b when in
918                          * promiscuous mode.
919                          */
920                         if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
921                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
922                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
923                                 buf = rx_local->Fbr[rindex]->
924                                                 Va[bindex];
925
926                                 /* Loop through our list to see if the
927                                  * destination address of this packet
928                                  * matches one in our list.
929                                  */
930                                 for (i = 0;
931                                      i < etdev->MCAddressCount;
932                                      i++) {
933                                         if (buf[0] ==
934                                             etdev->MCList[i][0]
935                                             && buf[1] ==
936                                             etdev->MCList[i][1]
937                                             && buf[2] ==
938                                             etdev->MCList[i][2]
939                                             && buf[3] ==
940                                             etdev->MCList[i][3]
941                                             && buf[4] ==
942                                             etdev->MCList[i][4]
943                                             && buf[5] ==
944                                             etdev->MCList[i][5]) {
945                                                 break;
946                                         }
947                                 }
948
949                                 /* If our index is equal to the number
950                                  * of Multicast address we have, then
951                                  * this means we did not find this
952                                  * packet's matching address in our
953                                  * list.  Set the PacketSize to zero,
954                                  * so we free our RFD when we return
955                                  * from this function.
956                                  */
957                                 if (i == etdev->MCAddressCount)
958                                         len = 0;
959                         }
960
961                         if (len > 0)
962                                 etdev->Stats.multircv++;
963                 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
964                         etdev->Stats.brdcstrcv++;
965                 else
966                         /* Not sure what this counter measures in
967                          * promiscuous mode. Perhaps we should check
968                          * the MAC address to see if it is directed
969                          * to us in promiscuous mode.
970                          */
971                         etdev->Stats.unircv++;
972         }
973
974         if (len > 0) {
975                 struct sk_buff *skb = NULL;
976
977                 /* rfd->PacketSize = len - 4; */
978                 rfd->PacketSize = len;
979
980                 skb = dev_alloc_skb(rfd->PacketSize + 2);
981                 if (!skb) {
982                         dev_err(&etdev->pdev->dev,
983                                   "Couldn't alloc an SKB for Rx\n");
984                         return NULL;
985                 }
986
987                 etdev->net_stats.rx_bytes += rfd->PacketSize;
988
989                 memcpy(skb_put(skb, rfd->PacketSize),
990                        rx_local->Fbr[rindex]->Va[bindex],
991                        rfd->PacketSize);
992
993                 skb->dev = etdev->netdev;
994                 skb->protocol = eth_type_trans(skb, etdev->netdev);
995                 skb->ip_summed = CHECKSUM_NONE;
996
997                 netif_rx(skb);
998         } else {
999                 rfd->PacketSize = 0;
1000         }
1001
1002         nic_return_rfd(etdev, rfd);
1003         return rfd;
1004 }
1005
1006 /**
1007  * et131x_reset_recv - Reset the receive list
1008  * @etdev: pointer to our adapter
1009  *
1010  * Assumption, Rcv spinlock has been acquired.
1011  */
1012 void et131x_reset_recv(struct et131x_adapter *etdev)
1013 {
1014         WARN_ON(list_empty(&etdev->RxRing.RecvList));
1015
1016 }
1017
1018 /**
1019  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1020  * @etdev: pointer to our adapter
1021  *
1022  * Assumption, Rcv spinlock has been acquired.
1023  */
1024 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1025 {
1026         PMP_RFD rfd = NULL;
1027         u32 count = 0;
1028         bool done = true;
1029
1030         /* Process up to available RFD's */
1031         while (count < NUM_PACKETS_HANDLED) {
1032                 if (list_empty(&etdev->RxRing.RecvList)) {
1033                         WARN_ON(etdev->RxRing.nReadyRecv != 0);
1034                         done = false;
1035                         break;
1036                 }
1037
1038                 rfd = nic_rx_pkts(etdev);
1039
1040                 if (rfd == NULL)
1041                         break;
1042
1043                 /* Do not receive any packets until a filter has been set.
1044                  * Do not receive any packets until we have link.
1045                  * If length is zero, return the RFD in order to advance the
1046                  * Free buffer ring.
1047                  */
1048                 if (!etdev->PacketFilter ||
1049                     !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1050                     rfd->PacketSize == 0) {
1051                         continue;
1052                 }
1053
1054                 /* Increment the number of packets we received */
1055                 etdev->Stats.ipackets++;
1056
1057                 /* Set the status on the packet, either resources or success */
1058                 if (etdev->RxRing.nReadyRecv < RFD_LOW_WATER_MARK) {
1059                         dev_warn(&etdev->pdev->dev,
1060                                     "RFD's are running out\n");
1061                 }
1062                 count++;
1063         }
1064
1065         if (count == NUM_PACKETS_HANDLED || !done) {
1066                 etdev->RxRing.UnfinishedReceives = true;
1067                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1068                        &etdev->regs->global.watchdog_timer);
1069         } else
1070                 /* Watchdog timer will disable itself if appropriate. */
1071                 etdev->RxRing.UnfinishedReceives = false;
1072 }
1073
1074 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1075 {
1076         u32 v = *fbr;
1077         v++;
1078         /* This works for all cases where limit < 1024. The 1023 case
1079            works because 1023++ is 1024 which means the if condition is not
1080            taken but the carry of the bit into the wrap bit toggles the wrap
1081            value correctly */
1082         if ((v & ET_DMA10_MASK) > limit) {
1083                 v &= ~ET_DMA10_MASK;
1084                 v ^= ET_DMA10_WRAP;
1085         }
1086         /* For the 1023 case */
1087         v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1088         *fbr = v;
1089         return v;
1090 }
1091
1092 /**
1093  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1094  * @etdev: pointer to our adapter
1095  * @rfd: pointer to the RFD
1096  */
1097 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
1098 {
1099         struct _rx_ring_t *rx_local = &etdev->RxRing;
1100         struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
1101         uint16_t bi = rfd->bufferindex;
1102         uint8_t ri = rfd->ringindex;
1103         unsigned long flags;
1104
1105         /* We don't use any of the OOB data besides status. Otherwise, we
1106          * need to clean up OOB data
1107          */
1108         if (
1109 #ifdef USE_FBR0
1110             (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1111 #endif
1112             (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1113                 spin_lock_irqsave(&etdev->FbrLock, flags);
1114
1115                 if (ri == 1) {
1116                         struct fbr_desc *next =
1117                             (struct fbr_desc *) (rx_local->pFbr1RingVa) +
1118                                             INDEX10(rx_local->local_Fbr1_full);
1119
1120                         /* Handle the Free Buffer Ring advancement here. Write
1121                          * the PA / Buffer Index for the returned buffer into
1122                          * the oldest (next to be freed)FBR entry
1123                          */
1124                         next->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1125                         next->addr_lo = rx_local->Fbr[1]->PALow[bi];
1126                         next->word2 = bi;
1127
1128                         writel(bump_fbr(&rx_local->local_Fbr1_full,
1129                                 rx_local->Fbr1NumEntries - 1),
1130                                 &rx_dma->fbr1_full_offset);
1131                 }
1132 #ifdef USE_FBR0
1133                 else {
1134                         struct fbr_desc *next = (struct fbr_desc *)
1135                                 rx_local->pFbr0RingVa +
1136                                         INDEX10(rx_local->local_Fbr0_full);
1137
1138                         /* Handle the Free Buffer Ring advancement here. Write
1139                          * the PA / Buffer Index for the returned buffer into
1140                          * the oldest (next to be freed) FBR entry
1141                          */
1142                         next->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1143                         next->addr_lo = rx_local->Fbr[0]->PALow[bi];
1144                         next->word2 = bi;
1145
1146                         writel(bump_fbr(&rx_local->local_Fbr0_full,
1147                                         rx_local->Fbr0NumEntries - 1),
1148                                &rx_dma->fbr0_full_offset);
1149                 }
1150 #endif
1151                 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1152         } else {
1153                 dev_err(&etdev->pdev->dev,
1154                           "NICReturnRFD illegal Buffer Index returned\n");
1155         }
1156
1157         /* The processing on this RFD is done, so put it back on the tail of
1158          * our list
1159          */
1160         spin_lock_irqsave(&etdev->RcvLock, flags);
1161         list_add_tail(&rfd->list_node, &rx_local->RecvList);
1162         rx_local->nReadyRecv++;
1163         spin_unlock_irqrestore(&etdev->RcvLock, flags);
1164
1165         WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1166 }