Staging: et131x: fbr_desc is now only sane types
[safe/jmp/linux-2.6] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_rx.c - Routines used to perform data reception
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
89
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92
93 #include "et1310_rx.h"
94
95
96 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
97
98 /**
99  * et131x_rx_dma_memory_alloc
100  * @adapter: pointer to our private adapter structure
101  *
102  * Returns 0 on success and errno on failure (as defined in errno.h)
103  *
104  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
105  * and the Packet Status Ring.
106  */
107 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
108 {
109         u32 i, j;
110         u32 bufsize;
111         u32 pktStatRingSize, FBRChunkSize;
112         RX_RING_t *rx_ring;
113
114         /* Setup some convenience pointers */
115         rx_ring = (RX_RING_t *) &adapter->RxRing;
116
117         /* Alloc memory for the lookup table */
118 #ifdef USE_FBR0
119         rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
120 #endif
121
122         rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
123
124         /* The first thing we will do is configure the sizes of the buffer
125          * rings. These will change based on jumbo packet support.  Larger
126          * jumbo packets increases the size of each entry in FBR0, and the
127          * number of entries in FBR0, while at the same time decreasing the
128          * number of entries in FBR1.
129          *
130          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
131          * entries are huge in order to accomodate a "jumbo" frame, then it
132          * will have less entries.  Conversely, FBR1 will now be relied upon
133          * to carry more "normal" frames, thus it's entry size also increases
134          * and the number of entries goes up too (since it now carries
135          * "small" + "regular" packets.
136          *
137          * In this scheme, we try to maintain 512 entries between the two
138          * rings. Also, FBR1 remains a constant size - when it's size doubles
139          * the number of entries halves.  FBR0 increases in size, however.
140          */
141
142         if (adapter->RegistryJumboPacket < 2048) {
143 #ifdef USE_FBR0
144                 rx_ring->Fbr0BufferSize = 256;
145                 rx_ring->Fbr0NumEntries = 512;
146 #endif
147                 rx_ring->Fbr1BufferSize = 2048;
148                 rx_ring->Fbr1NumEntries = 512;
149         } else if (adapter->RegistryJumboPacket < 4096) {
150 #ifdef USE_FBR0
151                 rx_ring->Fbr0BufferSize = 512;
152                 rx_ring->Fbr0NumEntries = 1024;
153 #endif
154                 rx_ring->Fbr1BufferSize = 4096;
155                 rx_ring->Fbr1NumEntries = 512;
156         } else {
157 #ifdef USE_FBR0
158                 rx_ring->Fbr0BufferSize = 1024;
159                 rx_ring->Fbr0NumEntries = 768;
160 #endif
161                 rx_ring->Fbr1BufferSize = 16384;
162                 rx_ring->Fbr1NumEntries = 128;
163         }
164
165 #ifdef USE_FBR0
166         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
167             adapter->RxRing.Fbr1NumEntries;
168 #else
169         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
170 #endif
171
172         /* Allocate an area of memory for Free Buffer Ring 1 */
173         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
174         rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
175                                                     bufsize,
176                                                     &rx_ring->pFbr1RingPa);
177         if (!rx_ring->pFbr1RingVa) {
178                 dev_err(&adapter->pdev->dev,
179                           "Cannot alloc memory for Free Buffer Ring 1\n");
180                 return -ENOMEM;
181         }
182
183         /* Save physical address
184          *
185          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
186          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
187          * are ever returned, make sure the high part is retrieved here
188          * before storing the adjusted address.
189          */
190         rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
191
192         /* Align Free Buffer Ring 1 on a 4K boundary */
193         et131x_align_allocated_memory(adapter,
194                                       &rx_ring->Fbr1Realpa,
195                                       &rx_ring->Fbr1offset, 0x0FFF);
196
197         rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
198                                         rx_ring->Fbr1offset);
199
200 #ifdef USE_FBR0
201         /* Allocate an area of memory for Free Buffer Ring 0 */
202         bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
203         rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
204                                                     bufsize,
205                                                     &rx_ring->pFbr0RingPa);
206         if (!rx_ring->pFbr0RingVa) {
207                 dev_err(&adapter->pdev->dev,
208                           "Cannot alloc memory for Free Buffer Ring 0\n");
209                 return -ENOMEM;
210         }
211
212         /* Save physical address
213          *
214          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
215          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
216          * are ever returned, make sure the high part is retrieved here before
217          * storing the adjusted address.
218          */
219         rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
220
221         /* Align Free Buffer Ring 0 on a 4K boundary */
222         et131x_align_allocated_memory(adapter,
223                                       &rx_ring->Fbr0Realpa,
224                                       &rx_ring->Fbr0offset, 0x0FFF);
225
226         rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
227                                         rx_ring->Fbr0offset);
228 #endif
229
230         for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
231              i++) {
232                 u64 Fbr1Offset;
233                 u64 Fbr1TempPa;
234                 u32 Fbr1Align;
235
236                 /* This code allocates an area of memory big enough for N
237                  * free buffers + (buffer_size - 1) so that the buffers can
238                  * be aligned on 4k boundaries.  If each buffer were aligned
239                  * to a buffer_size boundary, the effect would be to double
240                  * the size of FBR0.  By allocating N buffers at once, we
241                  * reduce this overhead.
242                  */
243                 if (rx_ring->Fbr1BufferSize > 4096)
244                         Fbr1Align = 4096;
245                 else
246                         Fbr1Align = rx_ring->Fbr1BufferSize;
247
248                 FBRChunkSize =
249                     (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
250                 rx_ring->Fbr1MemVa[i] =
251                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
252                                          &rx_ring->Fbr1MemPa[i]);
253
254                 if (!rx_ring->Fbr1MemVa[i]) {
255                 dev_err(&adapter->pdev->dev,
256                                 "Could not alloc memory\n");
257                         return -ENOMEM;
258                 }
259
260                 /* See NOTE in "Save Physical Address" comment above */
261                 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
262
263                 et131x_align_allocated_memory(adapter,
264                                               &Fbr1TempPa,
265                                               &Fbr1Offset, (Fbr1Align - 1));
266
267                 for (j = 0; j < FBR_CHUNKS; j++) {
268                         u32 index = (i * FBR_CHUNKS) + j;
269
270                         /* Save the Virtual address of this index for quick
271                          * access later
272                          */
273                         rx_ring->Fbr[1]->Va[index] =
274                             (uint8_t *) rx_ring->Fbr1MemVa[i] +
275                             (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
276
277                         /* now store the physical address in the descriptor
278                          * so the device can access it
279                          */
280                         rx_ring->Fbr[1]->PAHigh[index] =
281                             (u32) (Fbr1TempPa >> 32);
282                         rx_ring->Fbr[1]->PALow[index] = (u32) Fbr1TempPa;
283
284                         Fbr1TempPa += rx_ring->Fbr1BufferSize;
285
286                         rx_ring->Fbr[1]->Buffer1[index] =
287                             rx_ring->Fbr[1]->Va[index];
288                         rx_ring->Fbr[1]->Buffer2[index] =
289                             rx_ring->Fbr[1]->Va[index] - 4;
290                 }
291         }
292
293 #ifdef USE_FBR0
294         /* Same for FBR0 (if in use) */
295         for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
296              i++) {
297                 u64 Fbr0Offset;
298                 u64 Fbr0TempPa;
299
300                 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
301                 rx_ring->Fbr0MemVa[i] =
302                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
303                                          &rx_ring->Fbr0MemPa[i]);
304
305                 if (!rx_ring->Fbr0MemVa[i]) {
306                         dev_err(&adapter->pdev->dev,
307                                 "Could not alloc memory\n");
308                         return -ENOMEM;
309                 }
310
311                 /* See NOTE in "Save Physical Address" comment above */
312                 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
313
314                 et131x_align_allocated_memory(adapter,
315                                               &Fbr0TempPa,
316                                               &Fbr0Offset,
317                                               rx_ring->Fbr0BufferSize - 1);
318
319                 for (j = 0; j < FBR_CHUNKS; j++) {
320                         u32 index = (i * FBR_CHUNKS) + j;
321
322                         rx_ring->Fbr[0]->Va[index] =
323                             (uint8_t *) rx_ring->Fbr0MemVa[i] +
324                             (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
325
326                         rx_ring->Fbr[0]->PAHigh[index] =
327                             (u32) (Fbr0TempPa >> 32);
328                         rx_ring->Fbr[0]->PALow[index] = (u32) Fbr0TempPa;
329
330                         Fbr0TempPa += rx_ring->Fbr0BufferSize;
331
332                         rx_ring->Fbr[0]->Buffer1[index] =
333                             rx_ring->Fbr[0]->Va[index];
334                         rx_ring->Fbr[0]->Buffer2[index] =
335                             rx_ring->Fbr[0]->Va[index] - 4;
336                 }
337         }
338 #endif
339
340         /* Allocate an area of memory for FIFO of Packet Status ring entries */
341         pktStatRingSize =
342             sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
343
344         rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
345                                                   pktStatRingSize,
346                                                   &rx_ring->pPSRingPa);
347
348         if (!rx_ring->pPSRingVa) {
349                 dev_err(&adapter->pdev->dev,
350                           "Cannot alloc memory for Packet Status Ring\n");
351                 return -ENOMEM;
352         }
353         printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
354
355         /*
356          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
357          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
358          * are ever returned, make sure the high part is retrieved here before
359          * storing the adjusted address.
360          */
361
362         /* Allocate an area of memory for writeback of status information */
363         rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
364                                                     sizeof(RX_STATUS_BLOCK_t),
365                                                     &rx_ring->pRxStatusPa);
366         if (!rx_ring->pRxStatusVa) {
367                 dev_err(&adapter->pdev->dev,
368                           "Cannot alloc memory for Status Block\n");
369                 return -ENOMEM;
370         }
371         rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
372         printk("PRS %lx\n", (unsigned long)rx_ring->pRxStatusPa);
373
374         /* Recv
375          * pci_pool_create initializes a lookaside list. After successful
376          * creation, nonpaged fixed-size blocks can be allocated from and
377          * freed to the lookaside list.
378          * RFDs will be allocated from this pool.
379          */
380         rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
381                                                    sizeof(MP_RFD),
382                                                    0,
383                                                    SLAB_CACHE_DMA |
384                                                    SLAB_HWCACHE_ALIGN,
385                                                    NULL);
386
387         adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
388
389         /* The RFDs are going to be put on lists later on, so initialize the
390          * lists now.
391          */
392         INIT_LIST_HEAD(&rx_ring->RecvList);
393         return 0;
394 }
395
396 /**
397  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
398  * @adapter: pointer to our private adapter structure
399  */
400 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
401 {
402         u32 index;
403         u32 bufsize;
404         u32 pktStatRingSize;
405         PMP_RFD rfd;
406         RX_RING_t *rx_ring;
407
408         /* Setup some convenience pointers */
409         rx_ring = (RX_RING_t *) &adapter->RxRing;
410
411         /* Free RFDs and associated packet descriptors */
412         WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
413
414         while (!list_empty(&rx_ring->RecvList)) {
415                 rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
416                                                MP_RFD, list_node);
417
418                 list_del(&rfd->list_node);
419                 rfd->Packet = NULL;
420                 kmem_cache_free(adapter->RxRing.RecvLookaside, rfd);
421         }
422
423         /* Free Free Buffer Ring 1 */
424         if (rx_ring->pFbr1RingVa) {
425                 /* First the packet memory */
426                 for (index = 0; index <
427                      (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
428                         if (rx_ring->Fbr1MemVa[index]) {
429                                 u32 Fbr1Align;
430
431                                 if (rx_ring->Fbr1BufferSize > 4096)
432                                         Fbr1Align = 4096;
433                                 else
434                                         Fbr1Align = rx_ring->Fbr1BufferSize;
435
436                                 bufsize =
437                                     (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
438                                     Fbr1Align - 1;
439
440                                 pci_free_consistent(adapter->pdev,
441                                                     bufsize,
442                                                     rx_ring->Fbr1MemVa[index],
443                                                     rx_ring->Fbr1MemPa[index]);
444
445                                 rx_ring->Fbr1MemVa[index] = NULL;
446                         }
447                 }
448
449                 /* Now the FIFO itself */
450                 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
451                                 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
452
453                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
454                                                                 + 0xfff;
455
456                 pci_free_consistent(adapter->pdev, bufsize,
457                                 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
458
459                 rx_ring->pFbr1RingVa = NULL;
460         }
461
462 #ifdef USE_FBR0
463         /* Now the same for Free Buffer Ring 0 */
464         if (rx_ring->pFbr0RingVa) {
465                 /* First the packet memory */
466                 for (index = 0; index <
467                      (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
468                         if (rx_ring->Fbr0MemVa[index]) {
469                                 bufsize =
470                                     (rx_ring->Fbr0BufferSize *
471                                      (FBR_CHUNKS + 1)) - 1;
472
473                                 pci_free_consistent(adapter->pdev,
474                                                     bufsize,
475                                                     rx_ring->Fbr0MemVa[index],
476                                                     rx_ring->Fbr0MemPa[index]);
477
478                                 rx_ring->Fbr0MemVa[index] = NULL;
479                         }
480                 }
481
482                 /* Now the FIFO itself */
483                 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
484                                 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
485
486                 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
487                                                                 + 0xfff;
488
489                 pci_free_consistent(adapter->pdev,
490                                     bufsize,
491                                     rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
492
493                 rx_ring->pFbr0RingVa = NULL;
494         }
495 #endif
496
497         /* Free Packet Status Ring */
498         if (rx_ring->pPSRingVa) {
499                 pktStatRingSize =
500                     sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
501
502                 pci_free_consistent(adapter->pdev, pktStatRingSize,
503                                     rx_ring->pPSRingVa, rx_ring->pPSRingPa);
504
505                 rx_ring->pPSRingVa = NULL;
506         }
507
508         /* Free area of memory for the writeback of status information */
509         if (rx_ring->pRxStatusVa) {
510                 pci_free_consistent(adapter->pdev,
511                                 sizeof(RX_STATUS_BLOCK_t),
512                                 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
513
514                 rx_ring->pRxStatusVa = NULL;
515         }
516
517         /* Free receive buffer pool */
518
519         /* Free receive packet pool */
520
521         /* Destroy the lookaside (RFD) pool */
522         if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
523                 kmem_cache_destroy(rx_ring->RecvLookaside);
524                 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
525         }
526
527         /* Free the FBR Lookup Table */
528 #ifdef USE_FBR0
529         kfree(rx_ring->Fbr[0]);
530 #endif
531
532         kfree(rx_ring->Fbr[1]);
533
534         /* Reset Counters */
535         rx_ring->nReadyRecv = 0;
536 }
537
538 /**
539  * et131x_init_recv - Initialize receive data structures.
540  * @adapter: pointer to our private adapter structure
541  *
542  * Returns 0 on success and errno on failure (as defined in errno.h)
543  */
544 int et131x_init_recv(struct et131x_adapter *adapter)
545 {
546         int status = -ENOMEM;
547         PMP_RFD rfd = NULL;
548         u32 rfdct;
549         u32 numrfd = 0;
550         RX_RING_t *rx_ring = NULL;
551
552         /* Setup some convenience pointers */
553         rx_ring = (RX_RING_t *) &adapter->RxRing;
554
555         /* Setup each RFD */
556         for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
557                 rfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
558                                                      GFP_ATOMIC | GFP_DMA);
559
560                 if (!rfd) {
561                         dev_err(&adapter->pdev->dev,
562                                   "Couldn't alloc RFD out of kmem_cache\n");
563                         status = -ENOMEM;
564                         continue;
565                 }
566
567                 rfd->Packet = NULL;
568
569                 /* Add this RFD to the RecvList */
570                 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
571
572                 /* Increment both the available RFD's, and the total RFD's. */
573                 rx_ring->nReadyRecv++;
574                 numrfd++;
575         }
576
577         if (numrfd > NIC_MIN_NUM_RFD)
578                 status = 0;
579
580         rx_ring->NumRfd = numrfd;
581
582         if (status != 0) {
583                 kmem_cache_free(rx_ring->RecvLookaside, rfd);
584                 dev_err(&adapter->pdev->dev,
585                           "Allocation problems in et131x_init_recv\n");
586         }
587         return status;
588 }
589
590 /**
591  * ConfigRxDmaRegs - Start of Rx_DMA init sequence
592  * @etdev: pointer to our adapter structure
593  */
594 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
595 {
596         struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
597         struct _rx_ring_t *rx_local = &etdev->RxRing;
598         struct fbr_desc *fbr_entry;
599         u32 entry;
600         u32 psr_num_des;
601         unsigned long flags;
602
603         /* Halt RXDMA to perform the reconfigure.  */
604         et131x_rx_dma_disable(etdev);
605
606         /* Load the completion writeback physical address
607          *
608          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
609          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
610          * are ever returned, make sure the high part is retrieved here
611          * before storing the adjusted address.
612          */
613         writel((u32) ((u64)rx_local->pRxStatusPa >> 32),
614                &rx_dma->dma_wb_base_hi);
615         writel((u32) rx_local->pRxStatusPa, &rx_dma->dma_wb_base_lo);
616
617         memset(rx_local->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
618
619         /* Set the address and parameters of the packet status ring into the
620          * 1310's registers
621          */
622         writel((u32) ((u64)rx_local->pPSRingPa >> 32),
623                &rx_dma->psr_base_hi);
624         writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
625         writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
626         writel(0, &rx_dma->psr_full_offset);
627
628         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
629         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
630                &rx_dma->psr_min_des);
631
632         spin_lock_irqsave(&etdev->RcvLock, flags);
633
634         /* These local variables track the PSR in the adapter structure */
635         rx_local->local_psr_full = 0;
636
637         /* Now's the best time to initialize FBR1 contents */
638         fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
639         for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
640                 fbr_entry->addr_hi = rx_local->Fbr[1]->PAHigh[entry];
641                 fbr_entry->addr_lo = rx_local->Fbr[1]->PALow[entry];
642                 fbr_entry->word2 = entry;
643                 fbr_entry++;
644         }
645
646         /* Set the address and parameters of Free buffer ring 1 (and 0 if
647          * required) into the 1310's registers
648          */
649         writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
650         writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
651         writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
652         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
653
654         /* This variable tracks the free buffer ring 1 full position, so it
655          * has to match the above.
656          */
657         rx_local->local_Fbr1_full = ET_DMA10_WRAP;
658         writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
659                &rx_dma->fbr1_min_des);
660
661 #ifdef USE_FBR0
662         /* Now's the best time to initialize FBR0 contents */
663         fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
664         for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
665                 fbr_entry->addr_hi = rx_local->Fbr[0]->PAHigh[entry];
666                 fbr_entry->addr_lo = rx_local->Fbr[0]->PALow[entry];
667                 fbr_entry->word2 = entry;
668                 fbr_entry++;
669         }
670
671         writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
672         writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
673         writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
674         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
675
676         /* This variable tracks the free buffer ring 0 full position, so it
677          * has to match the above.
678          */
679         rx_local->local_Fbr0_full = ET_DMA10_WRAP;
680         writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
681                &rx_dma->fbr0_min_des);
682 #endif
683
684         /* Program the number of packets we will receive before generating an
685          * interrupt.
686          * For version B silicon, this value gets updated once autoneg is
687          *complete.
688          */
689         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
690
691         /* The "time_done" is not working correctly to coalesce interrupts
692          * after a given time period, but rather is giving us an interrupt
693          * regardless of whether we have received packets.
694          * This value gets updated once autoneg is complete.
695          */
696         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
697
698         spin_unlock_irqrestore(&etdev->RcvLock, flags);
699 }
700
701 /**
702  * SetRxDmaTimer - Set the heartbeat timer according to line rate.
703  * @etdev: pointer to our adapter structure
704  */
705 void SetRxDmaTimer(struct et131x_adapter *etdev)
706 {
707         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
708          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
709          */
710         if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
711             (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
712                 writel(0, &etdev->regs->rxdma.max_pkt_time);
713                 writel(1, &etdev->regs->rxdma.num_pkt_done);
714         }
715 }
716
717 /**
718  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
719  * @etdev: pointer to our adapter structure
720  */
721 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
722 {
723         RXDMA_CSR_t csr;
724
725         /* Setup the receive dma configuration register */
726         writel(0x00002001, &etdev->regs->rxdma.csr.value);
727         csr.value = readl(&etdev->regs->rxdma.csr.value);
728         if (csr.bits.halt_status != 1) {
729                 udelay(5);
730                 csr.value = readl(&etdev->regs->rxdma.csr.value);
731                 if (csr.bits.halt_status != 1)
732                         dev_err(&etdev->pdev->dev,
733                                 "RX Dma failed to enter halt state. CSR 0x%08x\n",
734                                 csr.value);
735         }
736 }
737
738 /**
739  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
740  * @etdev: pointer to our adapter structure
741  */
742 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
743 {
744         /* Setup the receive dma configuration register for normal operation */
745         RXDMA_CSR_t csr = { 0 };
746
747         csr.bits.fbr1_enable = 1;
748         if (etdev->RxRing.Fbr1BufferSize == 4096)
749                 csr.bits.fbr1_size = 1;
750         else if (etdev->RxRing.Fbr1BufferSize == 8192)
751                 csr.bits.fbr1_size = 2;
752         else if (etdev->RxRing.Fbr1BufferSize == 16384)
753                 csr.bits.fbr1_size = 3;
754 #ifdef USE_FBR0
755         csr.bits.fbr0_enable = 1;
756         if (etdev->RxRing.Fbr0BufferSize == 256)
757                 csr.bits.fbr0_size = 1;
758         else if (etdev->RxRing.Fbr0BufferSize == 512)
759                 csr.bits.fbr0_size = 2;
760         else if (etdev->RxRing.Fbr0BufferSize == 1024)
761                 csr.bits.fbr0_size = 3;
762 #endif
763         writel(csr.value, &etdev->regs->rxdma.csr.value);
764
765         csr.value = readl(&etdev->regs->rxdma.csr.value);
766         if (csr.bits.halt_status != 0) {
767                 udelay(5);
768                 csr.value = readl(&etdev->regs->rxdma.csr.value);
769                 if (csr.bits.halt_status != 0) {
770                         dev_err(&etdev->pdev->dev,
771                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
772                                 csr.value);
773                 }
774         }
775 }
776
777 /**
778  * nic_rx_pkts - Checks the hardware for available packets
779  * @etdev: pointer to our adapter
780  *
781  * Returns rfd, a pointer to our MPRFD.
782  *
783  * Checks the hardware for available packets, using completion ring
784  * If packets are available, it gets an RFD from the RecvList, attaches
785  * the packet to it, puts the RFD in the RecvPendList, and also returns
786  * the pointer to the RFD.
787  */
788 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
789 {
790         struct _rx_ring_t *rx_local = &etdev->RxRing;
791         PRX_STATUS_BLOCK_t status;
792         PPKT_STAT_DESC_t psr;
793         PMP_RFD rfd;
794         u32 i;
795         uint8_t *buf;
796         unsigned long flags;
797         struct list_head *element;
798         uint8_t rindex;
799         uint16_t bindex;
800         u32 len;
801         PKT_STAT_DESC_WORD0_t Word0;
802
803         /* RX Status block is written by the DMA engine prior to every
804          * interrupt. It contains the next to be used entry in the Packet
805          * Status Ring, and also the two Free Buffer rings.
806          */
807         status = (PRX_STATUS_BLOCK_t) rx_local->pRxStatusVa;
808
809         /* FIXME: tidy later when conversions complete */
810         if (status->Word1.bits.PSRoffset ==
811                         (rx_local->local_psr_full & 0xFFF) &&
812                         status->Word1.bits.PSRwrap ==
813                         ((rx_local->local_psr_full >> 12) & 1)) {
814                 /* Looks like this ring is not updated yet */
815                 return NULL;
816         }
817
818         /* The packet status ring indicates that data is available. */
819         psr = (PPKT_STAT_DESC_t) (rx_local->pPSRingVa) +
820                         (rx_local->local_psr_full & 0xFFF);
821
822         /* Grab any information that is required once the PSR is
823          * advanced, since we can no longer rely on the memory being
824          * accurate
825          */
826         len = psr->word1.bits.length;
827         rindex = (uint8_t) psr->word1.bits.ri;
828         bindex = (uint16_t) psr->word1.bits.bi;
829         Word0 = psr->word0;
830
831         /* Indicate that we have used this PSR entry. */
832         /* FIXME wrap 12 */
833         add_12bit(&rx_local->local_psr_full, 1);
834         if ((rx_local->local_psr_full & 0xFFF)  > rx_local->PsrNumEntries - 1) {
835                 /* Clear psr full and toggle the wrap bit */
836                 rx_local->local_psr_full &=  ~0xFFF;
837                 rx_local->local_psr_full ^= 0x1000;
838         }
839
840         writel(rx_local->local_psr_full,
841                &etdev->regs->rxdma.psr_full_offset);
842
843 #ifndef USE_FBR0
844         if (rindex != 1) {
845                 return NULL;
846         }
847 #endif
848
849 #ifdef USE_FBR0
850         if (rindex > 1 ||
851                 (rindex == 0 &&
852                 bindex > rx_local->Fbr0NumEntries - 1) ||
853                 (rindex == 1 &&
854                 bindex > rx_local->Fbr1NumEntries - 1))
855 #else
856         if (rindex != 1 ||
857                 bindex > rx_local->Fbr1NumEntries - 1)
858 #endif
859         {
860                 /* Illegal buffer or ring index cannot be used by S/W*/
861                 dev_err(&etdev->pdev->dev,
862                           "NICRxPkts PSR Entry %d indicates "
863                           "length of %d and/or bad bi(%d)\n",
864                           rx_local->local_psr_full & 0xFFF,
865                           len, bindex);
866                 return NULL;
867         }
868
869         /* Get and fill the RFD. */
870         spin_lock_irqsave(&etdev->RcvLock, flags);
871
872         rfd = NULL;
873         element = rx_local->RecvList.next;
874         rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
875
876         if (rfd == NULL) {
877                 spin_unlock_irqrestore(&etdev->RcvLock, flags);
878                 return NULL;
879         }
880
881         list_del(&rfd->list_node);
882         rx_local->nReadyRecv--;
883
884         spin_unlock_irqrestore(&etdev->RcvLock, flags);
885
886         rfd->bufferindex = bindex;
887         rfd->ringindex = rindex;
888
889         /* In V1 silicon, there is a bug which screws up filtering of
890          * runt packets.  Therefore runt packet filtering is disabled
891          * in the MAC and the packets are dropped here.  They are
892          * also counted here.
893          */
894         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
895                 etdev->Stats.other_errors++;
896                 len = 0;
897         }
898
899         if (len) {
900                 if (etdev->ReplicaPhyLoopbk == 1) {
901                         buf = rx_local->Fbr[rindex]->Va[bindex];
902
903                         if (memcmp(&buf[6], &etdev->CurrentAddress[0],
904                                    ETH_ALEN) == 0) {
905                                 if (memcmp(&buf[42], "Replica packet",
906                                            ETH_HLEN)) {
907                                         etdev->ReplicaPhyLoopbkPF = 1;
908                                 }
909                         }
910                 }
911
912                 /* Determine if this is a multicast packet coming in */
913                 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
914                     !(Word0.value & ALCATEL_BROADCAST_PKT)) {
915                         /* Promiscuous mode and Multicast mode are
916                          * not mutually exclusive as was first
917                          * thought.  I guess Promiscuous is just
918                          * considered a super-set of the other
919                          * filters. Generally filter is 0x2b when in
920                          * promiscuous mode.
921                          */
922                         if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
923                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
924                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
925                                 buf = rx_local->Fbr[rindex]->
926                                                 Va[bindex];
927
928                                 /* Loop through our list to see if the
929                                  * destination address of this packet
930                                  * matches one in our list.
931                                  */
932                                 for (i = 0;
933                                      i < etdev->MCAddressCount;
934                                      i++) {
935                                         if (buf[0] ==
936                                             etdev->MCList[i][0]
937                                             && buf[1] ==
938                                             etdev->MCList[i][1]
939                                             && buf[2] ==
940                                             etdev->MCList[i][2]
941                                             && buf[3] ==
942                                             etdev->MCList[i][3]
943                                             && buf[4] ==
944                                             etdev->MCList[i][4]
945                                             && buf[5] ==
946                                             etdev->MCList[i][5]) {
947                                                 break;
948                                         }
949                                 }
950
951                                 /* If our index is equal to the number
952                                  * of Multicast address we have, then
953                                  * this means we did not find this
954                                  * packet's matching address in our
955                                  * list.  Set the PacketSize to zero,
956                                  * so we free our RFD when we return
957                                  * from this function.
958                                  */
959                                 if (i == etdev->MCAddressCount)
960                                         len = 0;
961                         }
962
963                         if (len > 0)
964                                 etdev->Stats.multircv++;
965                 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
966                         etdev->Stats.brdcstrcv++;
967                 else
968                         /* Not sure what this counter measures in
969                          * promiscuous mode. Perhaps we should check
970                          * the MAC address to see if it is directed
971                          * to us in promiscuous mode.
972                          */
973                         etdev->Stats.unircv++;
974         }
975
976         if (len > 0) {
977                 struct sk_buff *skb = NULL;
978
979                 /* rfd->PacketSize = len - 4; */
980                 rfd->PacketSize = len;
981
982                 skb = dev_alloc_skb(rfd->PacketSize + 2);
983                 if (!skb) {
984                         dev_err(&etdev->pdev->dev,
985                                   "Couldn't alloc an SKB for Rx\n");
986                         return NULL;
987                 }
988
989                 etdev->net_stats.rx_bytes += rfd->PacketSize;
990
991                 memcpy(skb_put(skb, rfd->PacketSize),
992                        rx_local->Fbr[rindex]->Va[bindex],
993                        rfd->PacketSize);
994
995                 skb->dev = etdev->netdev;
996                 skb->protocol = eth_type_trans(skb, etdev->netdev);
997                 skb->ip_summed = CHECKSUM_NONE;
998
999                 netif_rx(skb);
1000         } else {
1001                 rfd->PacketSize = 0;
1002         }
1003
1004         nic_return_rfd(etdev, rfd);
1005         return rfd;
1006 }
1007
1008 /**
1009  * et131x_reset_recv - Reset the receive list
1010  * @etdev: pointer to our adapter
1011  *
1012  * Assumption, Rcv spinlock has been acquired.
1013  */
1014 void et131x_reset_recv(struct et131x_adapter *etdev)
1015 {
1016         WARN_ON(list_empty(&etdev->RxRing.RecvList));
1017
1018 }
1019
1020 /**
1021  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1022  * @etdev: pointer to our adapter
1023  *
1024  * Assumption, Rcv spinlock has been acquired.
1025  */
1026 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1027 {
1028         PMP_RFD rfd = NULL;
1029         u32 count = 0;
1030         bool done = true;
1031
1032         /* Process up to available RFD's */
1033         while (count < NUM_PACKETS_HANDLED) {
1034                 if (list_empty(&etdev->RxRing.RecvList)) {
1035                         WARN_ON(etdev->RxRing.nReadyRecv != 0);
1036                         done = false;
1037                         break;
1038                 }
1039
1040                 rfd = nic_rx_pkts(etdev);
1041
1042                 if (rfd == NULL)
1043                         break;
1044
1045                 /* Do not receive any packets until a filter has been set.
1046                  * Do not receive any packets until we have link.
1047                  * If length is zero, return the RFD in order to advance the
1048                  * Free buffer ring.
1049                  */
1050                 if (!etdev->PacketFilter ||
1051                     !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1052                     rfd->PacketSize == 0) {
1053                         continue;
1054                 }
1055
1056                 /* Increment the number of packets we received */
1057                 etdev->Stats.ipackets++;
1058
1059                 /* Set the status on the packet, either resources or success */
1060                 if (etdev->RxRing.nReadyRecv < RFD_LOW_WATER_MARK) {
1061                         dev_warn(&etdev->pdev->dev,
1062                                     "RFD's are running out\n");
1063                 }
1064                 count++;
1065         }
1066
1067         if (count == NUM_PACKETS_HANDLED || !done) {
1068                 etdev->RxRing.UnfinishedReceives = true;
1069                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1070                        &etdev->regs->global.watchdog_timer);
1071         } else
1072                 /* Watchdog timer will disable itself if appropriate. */
1073                 etdev->RxRing.UnfinishedReceives = false;
1074 }
1075
1076 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1077 {
1078         u32 v = *fbr;
1079         v++;
1080         /* This works for all cases where limit < 1024. The 1023 case
1081            works because 1023++ is 1024 which means the if condition is not
1082            taken but the carry of the bit into the wrap bit toggles the wrap
1083            value correctly */
1084         if ((v & ET_DMA10_MASK) > limit) {
1085                 v &= ~ET_DMA10_MASK;
1086                 v ^= ET_DMA10_WRAP;
1087         }
1088         /* For the 1023 case */
1089         v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1090         *fbr = v;
1091         return v;
1092 }
1093
1094 /**
1095  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1096  * @etdev: pointer to our adapter
1097  * @rfd: pointer to the RFD
1098  */
1099 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
1100 {
1101         struct _rx_ring_t *rx_local = &etdev->RxRing;
1102         struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1103         uint16_t bi = rfd->bufferindex;
1104         uint8_t ri = rfd->ringindex;
1105         unsigned long flags;
1106
1107         /* We don't use any of the OOB data besides status. Otherwise, we
1108          * need to clean up OOB data
1109          */
1110         if (
1111 #ifdef USE_FBR0
1112             (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1113 #endif
1114             (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1115                 spin_lock_irqsave(&etdev->FbrLock, flags);
1116
1117                 if (ri == 1) {
1118                         struct fbr_desc *next =
1119                             (struct fbr_desc *) (rx_local->pFbr1RingVa) +
1120                                             INDEX10(rx_local->local_Fbr1_full);
1121
1122                         /* Handle the Free Buffer Ring advancement here. Write
1123                          * the PA / Buffer Index for the returned buffer into
1124                          * the oldest (next to be freed)FBR entry
1125                          */
1126                         next->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1127                         next->addr_lo = rx_local->Fbr[1]->PALow[bi];
1128                         next->word2 = bi;
1129
1130                         writel(bump_fbr(&rx_local->local_Fbr1_full,
1131                                 rx_local->Fbr1NumEntries - 1),
1132                                 &rx_dma->fbr1_full_offset);
1133                 }
1134 #ifdef USE_FBR0
1135                 else {
1136                         struct fbr_desc *next = (struct fbr_desc *)
1137                                 rx_local->pFbr0RingVa +
1138                                         INDEX10(rx_local->local_Fbr0_full);
1139
1140                         /* Handle the Free Buffer Ring advancement here. Write
1141                          * the PA / Buffer Index for the returned buffer into
1142                          * the oldest (next to be freed) FBR entry
1143                          */
1144                         next->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1145                         next->addr_lo = rx_local->Fbr[0]->PALow[bi];
1146                         next->word2 = bi;
1147
1148                         writel(bump_fbr(&rx_local->local_Fbr0_full,
1149                                         rx_local->Fbr0NumEntries - 1),
1150                                &rx_dma->fbr0_full_offset);
1151                 }
1152 #endif
1153                 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1154         } else {
1155                 dev_err(&etdev->pdev->dev,
1156                           "NICReturnRFD illegal Buffer Index returned\n");
1157         }
1158
1159         /* The processing on this RFD is done, so put it back on the tail of
1160          * our list
1161          */
1162         spin_lock_irqsave(&etdev->RcvLock, flags);
1163         list_add_tail(&rfd->list_node, &rx_local->RecvList);
1164         rx_local->nReadyRecv++;
1165         spin_unlock_irqrestore(&etdev->RcvLock, flags);
1166
1167         WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1168 }