clps711xfb: convert to proc_fops
[safe/jmp/linux-2.6] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Copyright 2000,2001  The Linux Kernel Team
5         Written/copyright 1994-2001 by Donald Becker.
6
7         This software may be used and distributed according to the terms
8         of the GNU General Public License, incorporated herein by reference.
9
10         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11         for more information on this driver.
12         Please submit bugs to http://bugzilla.kernel.org/ .
13
14 */
15
16 #include <linux/pci.h>
17 #include "tulip.h"
18 #include <linux/etherdevice.h>
19
20 int tulip_rx_copybreak;
21 unsigned int tulip_max_interrupt_work;
22
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24 #define MIT_SIZE 15
25 #define MIT_TABLE 15 /* We use 0 or max */
26
27 static unsigned int mit_table[MIT_SIZE+1] =
28 {
29         /*  CRS11 21143 hardware Mitigation Control Interrupt
30             We use only RX mitigation we other techniques for
31             TX intr. mitigation.
32
33            31    Cycle Size (timer control)
34            30:27 TX timer in 16 * Cycle size
35            26:24 TX No pkts before Int.
36            23:20 RX timer in Cycle size
37            19:17 RX No pkts before Int.
38            16       Continues Mode (CM)
39         */
40
41         0x0,             /* IM disabled */
42         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
43         0x80150000,
44         0x80270000,
45         0x80370000,
46         0x80490000,
47         0x80590000,
48         0x80690000,
49         0x807B0000,
50         0x808B0000,
51         0x809D0000,
52         0x80AD0000,
53         0x80BD0000,
54         0x80CF0000,
55         0x80DF0000,
56 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
57         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
58 };
59 #endif
60
61
62 int tulip_refill_rx(struct net_device *dev)
63 {
64         struct tulip_private *tp = netdev_priv(dev);
65         int entry;
66         int refilled = 0;
67
68         /* Refill the Rx ring buffers. */
69         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70                 entry = tp->dirty_rx % RX_RING_SIZE;
71                 if (tp->rx_buffers[entry].skb == NULL) {
72                         struct sk_buff *skb;
73                         dma_addr_t mapping;
74
75                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76                         if (skb == NULL)
77                                 break;
78
79                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
80                                                  PCI_DMA_FROMDEVICE);
81                         tp->rx_buffers[entry].mapping = mapping;
82
83                         skb->dev = dev;                 /* Mark as being used by this device. */
84                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85                         refilled++;
86                 }
87                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88         }
89         if(tp->chip_id == LC82C168) {
90                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91                         /* Rx stopped due to out of buffers,
92                          * restart it
93                          */
94                         iowrite32(0x01, tp->base_addr + CSR2);
95                 }
96         }
97         return refilled;
98 }
99
100 #ifdef CONFIG_TULIP_NAPI
101
102 void oom_timer(unsigned long data)
103 {
104         struct net_device *dev = (struct net_device *)data;
105         struct tulip_private *tp = netdev_priv(dev);
106         napi_schedule(&tp->napi);
107 }
108
109 int tulip_poll(struct napi_struct *napi, int budget)
110 {
111         struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112         struct net_device *dev = tp->dev;
113         int entry = tp->cur_rx % RX_RING_SIZE;
114         int work_done = 0;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
116         int received = 0;
117 #endif
118
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121 /* that one buffer is needed for mit activation; or might be a
122    bug in the ring buffer code; check later -- JHS*/
123
124         if (budget >=RX_RING_SIZE) budget--;
125 #endif
126
127         if (tulip_debug > 4)
128                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129                            tp->rx_ring[entry].status);
130
131        do {
132                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133                         printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
134                         break;
135                 }
136                /* Acknowledge current RX interrupt sources. */
137                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
138
139
140                /* If we own the next entry, it is a new packet. Send it up. */
141                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
143                        short pkt_len;
144
145                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146                                break;
147
148                        if (tulip_debug > 5)
149                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150                                       dev->name, entry, status);
151
152                        if (++work_done >= budget)
153                                goto not_done;
154
155                        /*
156                         * Omit the four octet CRC from the length.
157                         * (May not be considered valid until we have
158                         * checked status for RxLengthOver2047 bits)
159                         */
160                        pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162                        /*
163                         * Maximum pkt_len is 1518 (1514 + vlan header)
164                         * Anything higher than this is always invalid
165                         * regardless of RxLengthOver2047 bits
166                         */
167
168                        if ((status & (RxLengthOver2047 |
169                                       RxDescCRCError |
170                                       RxDescCollisionSeen |
171                                       RxDescRunt |
172                                       RxDescDescErr |
173                                       RxWholePkt)) != RxWholePkt ||
174                            pkt_len > 1518) {
175                                if ((status & (RxLengthOver2047 |
176                                               RxWholePkt)) != RxWholePkt) {
177                                 /* Ingore earlier buffers. */
178                                        if ((status & 0xffff) != 0x7fff) {
179                                                if (tulip_debug > 1)
180                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
181                                                               "spanned multiple buffers, status %8.8x!\n",
182                                                               dev->name, status);
183                                                tp->stats.rx_length_errors++;
184                                        }
185                                } else {
186                                 /* There was a fatal error. */
187                                        if (tulip_debug > 2)
188                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
189                                                       dev->name, status);
190                                        tp->stats.rx_errors++; /* end of a packet.*/
191                                        if (pkt_len > 1518 ||
192                                            (status & RxDescRunt))
193                                                tp->stats.rx_length_errors++;
194
195                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
196                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
197                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
198                                }
199                        } else {
200                                struct sk_buff *skb;
201
202                                /* Check if the packet is long enough to accept without copying
203                                   to a minimally-sized skbuff. */
204                                if (pkt_len < tulip_rx_copybreak &&
205                                    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
206                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
207                                        pci_dma_sync_single_for_cpu(tp->pdev,
208                                                                    tp->rx_buffers[entry].mapping,
209                                                                    pkt_len, PCI_DMA_FROMDEVICE);
210 #if ! defined(__alpha__)
211                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212                                                         pkt_len);
213                                        skb_put(skb, pkt_len);
214 #else
215                                        memcpy(skb_put(skb, pkt_len),
216                                               tp->rx_buffers[entry].skb->data,
217                                               pkt_len);
218 #endif
219                                        pci_dma_sync_single_for_device(tp->pdev,
220                                                                       tp->rx_buffers[entry].mapping,
221                                                                       pkt_len, PCI_DMA_FROMDEVICE);
222                                } else {        /* Pass up the skb already on the Rx ring. */
223                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224                                                             pkt_len);
225
226 #ifndef final_version
227                                        if (tp->rx_buffers[entry].mapping !=
228                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
230                                                       "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
231                                                       dev->name,
232                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
233                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
234                                                       skb->head, temp);
235                                        }
236 #endif
237
238                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
239                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
240
241                                        tp->rx_buffers[entry].skb = NULL;
242                                        tp->rx_buffers[entry].mapping = 0;
243                                }
244                                skb->protocol = eth_type_trans(skb, dev);
245
246                                netif_receive_skb(skb);
247
248                                tp->stats.rx_packets++;
249                                tp->stats.rx_bytes += pkt_len;
250                        }
251 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
252                        received++;
253 #endif
254
255                        entry = (++tp->cur_rx) % RX_RING_SIZE;
256                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
257                                tulip_refill_rx(dev);
258
259                 }
260
261                /* New ack strategy... irq does not ack Rx any longer
262                   hopefully this helps */
263
264                /* Really bad things can happen here... If new packet arrives
265                 * and an irq arrives (tx or just due to occasionally unset
266                 * mask), it will be acked by irq handler, but new thread
267                 * is not scheduled. It is major hole in design.
268                 * No idea how to fix this if "playing with fire" will fail
269                 * tomorrow (night 011029). If it will not fail, we won
270                 * finally: amount of IO did not increase at all. */
271        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
272
273  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
274
275           /* We use this simplistic scheme for IM. It's proven by
276              real life installations. We can have IM enabled
277             continuesly but this would cause unnecessary latency.
278             Unfortunely we can't use all the NET_RX_* feedback here.
279             This would turn on IM for devices that is not contributing
280             to backlog congestion with unnecessary latency.
281
282              We monitor the device RX-ring and have:
283
284              HW Interrupt Mitigation either ON or OFF.
285
286             ON:  More then 1 pkt received (per intr.) OR we are dropping
287              OFF: Only 1 pkt received
288
289              Note. We only use min and max (0, 15) settings from mit_table */
290
291
292           if( tp->flags &  HAS_INTR_MITIGATION) {
293                  if( received > 1 ) {
294                          if( ! tp->mit_on ) {
295                                  tp->mit_on = 1;
296                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
297                          }
298                   }
299                  else {
300                          if( tp->mit_on ) {
301                                  tp->mit_on = 0;
302                                  iowrite32(0, tp->base_addr + CSR11);
303                          }
304                   }
305           }
306
307 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
308
309          tulip_refill_rx(dev);
310
311          /* If RX ring is not full we are out of memory. */
312          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
313                  goto oom;
314
315          /* Remove us from polling list and enable RX intr. */
316
317          napi_complete(napi);
318          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
319
320          /* The last op happens after poll completion. Which means the following:
321           * 1. it can race with disabling irqs in irq handler
322           * 2. it can race with dise/enabling irqs in other poll threads
323           * 3. if an irq raised after beginning loop, it will be immediately
324           *    triggered here.
325           *
326           * Summarizing: the logic results in some redundant irqs both
327           * due to races in masking and due to too late acking of already
328           * processed irqs. But it must not result in losing events.
329           */
330
331          return work_done;
332
333  not_done:
334          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
335              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
336                  tulip_refill_rx(dev);
337
338          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
339                  goto oom;
340
341          return work_done;
342
343  oom:    /* Executed with RX ints disabled */
344
345          /* Start timer, stop polling, but do not enable rx interrupts. */
346          mod_timer(&tp->oom_timer, jiffies+1);
347
348          /* Think: timer_pending() was an explicit signature of bug.
349           * Timer can be pending now but fired and completed
350           * before we did napi_complete(). See? We would lose it. */
351
352          /* remove ourselves from the polling list */
353          napi_complete(napi);
354
355          return work_done;
356 }
357
358 #else /* CONFIG_TULIP_NAPI */
359
360 static int tulip_rx(struct net_device *dev)
361 {
362         struct tulip_private *tp = netdev_priv(dev);
363         int entry = tp->cur_rx % RX_RING_SIZE;
364         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
365         int received = 0;
366
367         if (tulip_debug > 4)
368                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
369                            tp->rx_ring[entry].status);
370         /* If we own the next entry, it is a new packet. Send it up. */
371         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
373                 short pkt_len;
374
375                 if (tulip_debug > 5)
376                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
377                                    dev->name, entry, status);
378                 if (--rx_work_limit < 0)
379                         break;
380
381                 /*
382                   Omit the four octet CRC from the length.
383                   (May not be considered valid until we have
384                   checked status for RxLengthOver2047 bits)
385                 */
386                 pkt_len = ((status >> 16) & 0x7ff) - 4;
387                 /*
388                   Maximum pkt_len is 1518 (1514 + vlan header)
389                   Anything higher than this is always invalid
390                   regardless of RxLengthOver2047 bits
391                 */
392
393                 if ((status & (RxLengthOver2047 |
394                                RxDescCRCError |
395                                RxDescCollisionSeen |
396                                RxDescRunt |
397                                RxDescDescErr |
398                                RxWholePkt))        != RxWholePkt ||
399                     pkt_len > 1518) {
400                         if ((status & (RxLengthOver2047 |
401                              RxWholePkt))         != RxWholePkt) {
402                                 /* Ingore earlier buffers. */
403                                 if ((status & 0xffff) != 0x7fff) {
404                                         if (tulip_debug > 1)
405                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
406                                                            "spanned multiple buffers, status %8.8x!\n",
407                                                            dev->name, status);
408                                         tp->stats.rx_length_errors++;
409                                 }
410                         } else {
411                                 /* There was a fatal error. */
412                                 if (tulip_debug > 2)
413                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
414                                                    dev->name, status);
415                                 tp->stats.rx_errors++; /* end of a packet.*/
416                                 if (pkt_len > 1518 ||
417                                     (status & RxDescRunt))
418                                         tp->stats.rx_length_errors++;
419                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
420                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
421                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
422                         }
423                 } else {
424                         struct sk_buff *skb;
425
426                         /* Check if the packet is long enough to accept without copying
427                            to a minimally-sized skbuff. */
428                         if (pkt_len < tulip_rx_copybreak &&
429                             (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
430                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
431                                 pci_dma_sync_single_for_cpu(tp->pdev,
432                                                             tp->rx_buffers[entry].mapping,
433                                                             pkt_len, PCI_DMA_FROMDEVICE);
434 #if ! defined(__alpha__)
435                                 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
436                                                  pkt_len);
437                                 skb_put(skb, pkt_len);
438 #else
439                                 memcpy(skb_put(skb, pkt_len),
440                                        tp->rx_buffers[entry].skb->data,
441                                        pkt_len);
442 #endif
443                                 pci_dma_sync_single_for_device(tp->pdev,
444                                                                tp->rx_buffers[entry].mapping,
445                                                                pkt_len, PCI_DMA_FROMDEVICE);
446                         } else {        /* Pass up the skb already on the Rx ring. */
447                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
448                                                      pkt_len);
449
450 #ifndef final_version
451                                 if (tp->rx_buffers[entry].mapping !=
452                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
453                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
454                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
455                                                dev->name,
456                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
457                                                (long long)tp->rx_buffers[entry].mapping,
458                                                skb->head, temp);
459                                 }
460 #endif
461
462                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
463                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
464
465                                 tp->rx_buffers[entry].skb = NULL;
466                                 tp->rx_buffers[entry].mapping = 0;
467                         }
468                         skb->protocol = eth_type_trans(skb, dev);
469
470                         netif_rx(skb);
471
472                         tp->stats.rx_packets++;
473                         tp->stats.rx_bytes += pkt_len;
474                 }
475                 received++;
476                 entry = (++tp->cur_rx) % RX_RING_SIZE;
477         }
478         return received;
479 }
480 #endif  /* CONFIG_TULIP_NAPI */
481
482 static inline unsigned int phy_interrupt (struct net_device *dev)
483 {
484 #ifdef __hppa__
485         struct tulip_private *tp = netdev_priv(dev);
486         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
487
488         if (csr12 != tp->csr12_shadow) {
489                 /* ack interrupt */
490                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
491                 tp->csr12_shadow = csr12;
492                 /* do link change stuff */
493                 spin_lock(&tp->lock);
494                 tulip_check_duplex(dev);
495                 spin_unlock(&tp->lock);
496                 /* clear irq ack bit */
497                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
498
499                 return 1;
500         }
501 #endif
502
503         return 0;
504 }
505
506 /* The interrupt handler does all of the Rx thread work and cleans up
507    after the Tx thread. */
508 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
509 {
510         struct net_device *dev = (struct net_device *)dev_instance;
511         struct tulip_private *tp = netdev_priv(dev);
512         void __iomem *ioaddr = tp->base_addr;
513         int csr5;
514         int missed;
515         int rx = 0;
516         int tx = 0;
517         int oi = 0;
518         int maxrx = RX_RING_SIZE;
519         int maxtx = TX_RING_SIZE;
520         int maxoi = TX_RING_SIZE;
521 #ifdef CONFIG_TULIP_NAPI
522         int rxd = 0;
523 #else
524         int entry;
525 #endif
526         unsigned int work_count = tulip_max_interrupt_work;
527         unsigned int handled = 0;
528
529         /* Let's see whether the interrupt really is for us */
530         csr5 = ioread32(ioaddr + CSR5);
531
532         if (tp->flags & HAS_PHY_IRQ)
533                 handled = phy_interrupt (dev);
534
535         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
536                 return IRQ_RETVAL(handled);
537
538         tp->nir++;
539
540         do {
541
542 #ifdef CONFIG_TULIP_NAPI
543
544                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
545                         rxd++;
546                         /* Mask RX intrs and add the device to poll list. */
547                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
548                         napi_schedule(&tp->napi);
549
550                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
551                                break;
552                 }
553
554                /* Acknowledge the interrupt sources we handle here ASAP
555                   the poll function does Rx and RxNoBuf acking */
556
557                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
558
559 #else
560                 /* Acknowledge all of the current interrupt sources ASAP. */
561                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
562
563
564                 if (csr5 & (RxIntr | RxNoBuf)) {
565                                 rx += tulip_rx(dev);
566                         tulip_refill_rx(dev);
567                 }
568
569 #endif /*  CONFIG_TULIP_NAPI */
570
571                 if (tulip_debug > 4)
572                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
573                                dev->name, csr5, ioread32(ioaddr + CSR5));
574
575
576                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
577                         unsigned int dirty_tx;
578
579                         spin_lock(&tp->lock);
580
581                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
582                                  dirty_tx++) {
583                                 int entry = dirty_tx % TX_RING_SIZE;
584                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
585
586                                 if (status < 0)
587                                         break;                  /* It still has not been Txed */
588
589                                 /* Check for Rx filter setup frames. */
590                                 if (tp->tx_buffers[entry].skb == NULL) {
591                                         /* test because dummy frames not mapped */
592                                         if (tp->tx_buffers[entry].mapping)
593                                                 pci_unmap_single(tp->pdev,
594                                                          tp->tx_buffers[entry].mapping,
595                                                          sizeof(tp->setup_frame),
596                                                          PCI_DMA_TODEVICE);
597                                         continue;
598                                 }
599
600                                 if (status & 0x8000) {
601                                         /* There was an major error, log it. */
602 #ifndef final_version
603                                         if (tulip_debug > 1)
604                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
605                                                            dev->name, status);
606 #endif
607                                         tp->stats.tx_errors++;
608                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
609                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
610                                         if (status & 0x0200) tp->stats.tx_window_errors++;
611                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
612                                         if ((status & 0x0080) && tp->full_duplex == 0)
613                                                 tp->stats.tx_heartbeat_errors++;
614                                 } else {
615                                         tp->stats.tx_bytes +=
616                                                 tp->tx_buffers[entry].skb->len;
617                                         tp->stats.collisions += (status >> 3) & 15;
618                                         tp->stats.tx_packets++;
619                                 }
620
621                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
622                                                  tp->tx_buffers[entry].skb->len,
623                                                  PCI_DMA_TODEVICE);
624
625                                 /* Free the original skb. */
626                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
627                                 tp->tx_buffers[entry].skb = NULL;
628                                 tp->tx_buffers[entry].mapping = 0;
629                                 tx++;
630                         }
631
632 #ifndef final_version
633                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
634                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
635                                            dev->name, dirty_tx, tp->cur_tx);
636                                 dirty_tx += TX_RING_SIZE;
637                         }
638 #endif
639
640                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
641                                 netif_wake_queue(dev);
642
643                         tp->dirty_tx = dirty_tx;
644                         if (csr5 & TxDied) {
645                                 if (tulip_debug > 2)
646                                         printk(KERN_WARNING "%s: The transmitter stopped."
647                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
648                                                    dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
649                                 tulip_restart_rxtx(tp);
650                         }
651                         spin_unlock(&tp->lock);
652                 }
653
654                 /* Log errors. */
655                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
656                         if (csr5 == 0xffffffff)
657                                 break;
658                         if (csr5 & TxJabber) tp->stats.tx_errors++;
659                         if (csr5 & TxFIFOUnderflow) {
660                                 if ((tp->csr6 & 0xC000) != 0xC000)
661                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
662                                 else
663                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
664                                 /* Restart the transmit process. */
665                                 tulip_restart_rxtx(tp);
666                                 iowrite32(0, ioaddr + CSR1);
667                         }
668                         if (csr5 & (RxDied | RxNoBuf)) {
669                                 if (tp->flags & COMET_MAC_ADDR) {
670                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
671                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
672                                 }
673                         }
674                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
675                                 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676                                 tp->stats.rx_errors++;
677                                 tulip_start_rxtx(tp);
678                         }
679                         /*
680                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681                          * call is ever done under the spinlock
682                          */
683                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
684                                 if (tp->link_change)
685                                         (tp->link_change)(dev, csr5);
686                         }
687                         if (csr5 & SystemError) {
688                                 int error = (csr5 >> 23) & 7;
689                                 /* oops, we hit a PCI error.  The code produced corresponds
690                                  * to the reason:
691                                  *  0 - parity error
692                                  *  1 - master abort
693                                  *  2 - target abort
694                                  * Note that on parity error, we should do a software reset
695                                  * of the chip to get it back into a sane state (according
696                                  * to the 21142/3 docs that is).
697                                  *   -- rmk
698                                  */
699                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
700                                         dev->name, tp->nir, error);
701                         }
702                         /* Clear all error sources, included undocumented ones! */
703                         iowrite32(0x0800f7ba, ioaddr + CSR5);
704                         oi++;
705                 }
706                 if (csr5 & TimerInt) {
707
708                         if (tulip_debug > 2)
709                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
710                                            dev->name, csr5);
711                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
712                         tp->ttimer = 0;
713                         oi++;
714                 }
715                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
716                         if (tulip_debug > 1)
717                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
718                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
719
720                        /* Acknowledge all interrupt sources. */
721                         iowrite32(0x8001ffff, ioaddr + CSR5);
722                         if (tp->flags & HAS_INTR_MITIGATION) {
723                      /* Josip Loncaric at ICASE did extensive experimentation
724                         to develop a good interrupt mitigation setting.*/
725                                 iowrite32(0x8b240000, ioaddr + CSR11);
726                         } else if (tp->chip_id == LC82C168) {
727                                 /* the LC82C168 doesn't have a hw timer.*/
728                                 iowrite32(0x00, ioaddr + CSR7);
729                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
730                         } else {
731                           /* Mask all interrupting sources, set timer to
732                                 re-enable. */
733                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
734                                 iowrite32(0x0012, ioaddr + CSR11);
735                         }
736                         break;
737                 }
738
739                 work_count--;
740                 if (work_count == 0)
741                         break;
742
743                 csr5 = ioread32(ioaddr + CSR5);
744
745 #ifdef CONFIG_TULIP_NAPI
746                 if (rxd)
747                         csr5 &= ~RxPollInt;
748         } while ((csr5 & (TxNoBuf |
749                           TxDied |
750                           TxIntr |
751                           TimerInt |
752                           /* Abnormal intr. */
753                           RxDied |
754                           TxFIFOUnderflow |
755                           TxJabber |
756                           TPLnkFail |
757                           SystemError )) != 0);
758 #else
759         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
760
761         tulip_refill_rx(dev);
762
763         /* check if the card is in suspend mode */
764         entry = tp->dirty_rx % RX_RING_SIZE;
765         if (tp->rx_buffers[entry].skb == NULL) {
766                 if (tulip_debug > 1)
767                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
768                 if (tp->chip_id == LC82C168) {
769                         iowrite32(0x00, ioaddr + CSR7);
770                         mod_timer(&tp->timer, RUN_AT(HZ/50));
771                 } else {
772                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
773                                 if (tulip_debug > 1)
774                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
775                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
776                                         ioaddr + CSR7);
777                                 iowrite32(TimerInt, ioaddr + CSR5);
778                                 iowrite32(12, ioaddr + CSR11);
779                                 tp->ttimer = 1;
780                         }
781                 }
782         }
783 #endif /* CONFIG_TULIP_NAPI */
784
785         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
786                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
787         }
788
789         if (tulip_debug > 4)
790                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
791                            dev->name, ioread32(ioaddr + CSR5));
792
793         return IRQ_HANDLED;
794 }