]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/tulip/interrupt.c
[TULIP]: NAPI full quantum bug.
[karo-tx-linux.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Maintained by Valerie Henson <val_henson@linux.intel.com>
5         Copyright 2000,2001  The Linux Kernel Team
6         Written/copyright 1994-2001 by Donald Becker.
7
8         This software may be used and distributed according to the terms
9         of the GNU General Public License, incorporated herein by reference.
10
11         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12         for more information on this driver, or visit the project
13         Web page at http://sourceforge.net/projects/tulip/
14
15 */
16
17 #include <linux/pci.h>
18 #include "tulip.h"
19 #include <linux/etherdevice.h>
20
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
23
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_SIZE 15
26 #define MIT_TABLE 15 /* We use 0 or max */
27
28 static unsigned int mit_table[MIT_SIZE+1] =
29 {
30         /*  CRS11 21143 hardware Mitigation Control Interrupt
31             We use only RX mitigation we other techniques for
32             TX intr. mitigation.
33
34            31    Cycle Size (timer control)
35            30:27 TX timer in 16 * Cycle size
36            26:24 TX No pkts before Int.
37            23:20 RX timer in Cycle size
38            19:17 RX No pkts before Int.
39            16       Continues Mode (CM)
40         */
41
42         0x0,             /* IM disabled */
43         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
44         0x80150000,
45         0x80270000,
46         0x80370000,
47         0x80490000,
48         0x80590000,
49         0x80690000,
50         0x807B0000,
51         0x808B0000,
52         0x809D0000,
53         0x80AD0000,
54         0x80BD0000,
55         0x80CF0000,
56         0x80DF0000,
57 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
58         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
59 };
60 #endif
61
62
63 int tulip_refill_rx(struct net_device *dev)
64 {
65         struct tulip_private *tp = netdev_priv(dev);
66         int entry;
67         int refilled = 0;
68
69         /* Refill the Rx ring buffers. */
70         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71                 entry = tp->dirty_rx % RX_RING_SIZE;
72                 if (tp->rx_buffers[entry].skb == NULL) {
73                         struct sk_buff *skb;
74                         dma_addr_t mapping;
75
76                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
77                         if (skb == NULL)
78                                 break;
79
80                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
81                                                  PCI_DMA_FROMDEVICE);
82                         tp->rx_buffers[entry].mapping = mapping;
83
84                         skb->dev = dev;                 /* Mark as being used by this device. */
85                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
86                         refilled++;
87                 }
88                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
89         }
90         if(tp->chip_id == LC82C168) {
91                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92                         /* Rx stopped due to out of buffers,
93                          * restart it
94                          */
95                         iowrite32(0x01, tp->base_addr + CSR2);
96                 }
97         }
98         return refilled;
99 }
100
101 #ifdef CONFIG_TULIP_NAPI
102
103 void oom_timer(unsigned long data)
104 {
105         struct net_device *dev = (struct net_device *)data;
106         struct tulip_private *tp = netdev_priv(dev);
107         netif_rx_schedule(dev, &tp->napi);
108 }
109
110 int tulip_poll(struct napi_struct *napi, int budget)
111 {
112         struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
113         struct net_device *dev = tp->dev;
114         int entry = tp->cur_rx % RX_RING_SIZE;
115         int work_done = 0;
116 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
117         int received = 0;
118 #endif
119
120         if (!netif_running(dev))
121                 goto done;
122
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124
125 /* that one buffer is needed for mit activation; or might be a
126    bug in the ring buffer code; check later -- JHS*/
127
128         if (budget >=RX_RING_SIZE) budget--;
129 #endif
130
131         if (tulip_debug > 4)
132                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133                            tp->rx_ring[entry].status);
134
135        do {
136                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
137                         printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
138                         break;
139                 }
140                /* Acknowledge current RX interrupt sources. */
141                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
142
143
144                /* If we own the next entry, it is a new packet. Send it up. */
145                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
146                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
147
148                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149                                break;
150
151                        if (tulip_debug > 5)
152                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
153                                       dev->name, entry, status);
154
155                        if (++work_done >= budget)
156                                goto not_done;
157
158                        if ((status & 0x38008300) != 0x0300) {
159                                if ((status & 0x38000300) != 0x0300) {
160                                 /* Ingore earlier buffers. */
161                                        if ((status & 0xffff) != 0x7fff) {
162                                                if (tulip_debug > 1)
163                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
164                                                               "spanned multiple buffers, status %8.8x!\n",
165                                                               dev->name, status);
166                                                tp->stats.rx_length_errors++;
167                                        }
168                                } else if (status & RxDescFatalErr) {
169                                 /* There was a fatal error. */
170                                        if (tulip_debug > 2)
171                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
172                                                       dev->name, status);
173                                        tp->stats.rx_errors++; /* end of a packet.*/
174                                        if (status & 0x0890) tp->stats.rx_length_errors++;
175                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
176                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
177                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
178                                }
179                        } else {
180                                /* Omit the four octet CRC from the length. */
181                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
182                                struct sk_buff *skb;
183
184 #ifndef final_version
185                                if (pkt_len > 1518) {
186                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
187                                               dev->name, pkt_len, pkt_len);
188                                        pkt_len = 1518;
189                                        tp->stats.rx_length_errors++;
190                                }
191 #endif
192                                /* Check if the packet is long enough to accept without copying
193                                   to a minimally-sized skbuff. */
194                                if (pkt_len < tulip_rx_copybreak
195                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
196                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
197                                        pci_dma_sync_single_for_cpu(tp->pdev,
198                                                                    tp->rx_buffers[entry].mapping,
199                                                                    pkt_len, PCI_DMA_FROMDEVICE);
200 #if ! defined(__alpha__)
201                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
202                                                         pkt_len);
203                                        skb_put(skb, pkt_len);
204 #else
205                                        memcpy(skb_put(skb, pkt_len),
206                                               tp->rx_buffers[entry].skb->data,
207                                               pkt_len);
208 #endif
209                                        pci_dma_sync_single_for_device(tp->pdev,
210                                                                       tp->rx_buffers[entry].mapping,
211                                                                       pkt_len, PCI_DMA_FROMDEVICE);
212                                } else {        /* Pass up the skb already on the Rx ring. */
213                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
214                                                             pkt_len);
215
216 #ifndef final_version
217                                        if (tp->rx_buffers[entry].mapping !=
218                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
219                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
220                                                       "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
221                                                       dev->name,
222                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
223                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
224                                                       skb->head, temp);
225                                        }
226 #endif
227
228                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
229                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
230
231                                        tp->rx_buffers[entry].skb = NULL;
232                                        tp->rx_buffers[entry].mapping = 0;
233                                }
234                                skb->protocol = eth_type_trans(skb, dev);
235
236                                netif_receive_skb(skb);
237
238                                dev->last_rx = jiffies;
239                                tp->stats.rx_packets++;
240                                tp->stats.rx_bytes += pkt_len;
241                        }
242 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
243                        received++;
244 #endif
245
246                        entry = (++tp->cur_rx) % RX_RING_SIZE;
247                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
248                                tulip_refill_rx(dev);
249
250                 }
251
252                /* New ack strategy... irq does not ack Rx any longer
253                   hopefully this helps */
254
255                /* Really bad things can happen here... If new packet arrives
256                 * and an irq arrives (tx or just due to occasionally unset
257                 * mask), it will be acked by irq handler, but new thread
258                 * is not scheduled. It is major hole in design.
259                 * No idea how to fix this if "playing with fire" will fail
260                 * tomorrow (night 011029). If it will not fail, we won
261                 * finally: amount of IO did not increase at all. */
262        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
263
264 done:
265
266  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
267
268           /* We use this simplistic scheme for IM. It's proven by
269              real life installations. We can have IM enabled
270             continuesly but this would cause unnecessary latency.
271             Unfortunely we can't use all the NET_RX_* feedback here.
272             This would turn on IM for devices that is not contributing
273             to backlog congestion with unnecessary latency.
274
275              We monitor the device RX-ring and have:
276
277              HW Interrupt Mitigation either ON or OFF.
278
279             ON:  More then 1 pkt received (per intr.) OR we are dropping
280              OFF: Only 1 pkt received
281
282              Note. We only use min and max (0, 15) settings from mit_table */
283
284
285           if( tp->flags &  HAS_INTR_MITIGATION) {
286                  if( received > 1 ) {
287                          if( ! tp->mit_on ) {
288                                  tp->mit_on = 1;
289                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
290                          }
291                   }
292                  else {
293                          if( tp->mit_on ) {
294                                  tp->mit_on = 0;
295                                  iowrite32(0, tp->base_addr + CSR11);
296                          }
297                   }
298           }
299
300 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
301
302          tulip_refill_rx(dev);
303
304          /* If RX ring is not full we are out of memory. */
305          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
306                  goto oom;
307
308          /* Remove us from polling list and enable RX intr. */
309
310          netif_rx_complete(dev, napi);
311          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
312
313          /* The last op happens after poll completion. Which means the following:
314           * 1. it can race with disabling irqs in irq handler
315           * 2. it can race with dise/enabling irqs in other poll threads
316           * 3. if an irq raised after beginning loop, it will be immediately
317           *    triggered here.
318           *
319           * Summarizing: the logic results in some redundant irqs both
320           * due to races in masking and due to too late acking of already
321           * processed irqs. But it must not result in losing events.
322           */
323
324          return work_done;
325
326  not_done:
327          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
328              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
329                  tulip_refill_rx(dev);
330
331          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
332                  goto oom;
333
334          return work_done;
335
336  oom:    /* Executed with RX ints disabled */
337
338          /* Start timer, stop polling, but do not enable rx interrupts. */
339          mod_timer(&tp->oom_timer, jiffies+1);
340
341          /* Think: timer_pending() was an explicit signature of bug.
342           * Timer can be pending now but fired and completed
343           * before we did netif_rx_complete(). See? We would lose it. */
344
345          /* remove ourselves from the polling list */
346          netif_rx_complete(dev, napi);
347
348          return work_done;
349 }
350
351 #else /* CONFIG_TULIP_NAPI */
352
353 static int tulip_rx(struct net_device *dev)
354 {
355         struct tulip_private *tp = netdev_priv(dev);
356         int entry = tp->cur_rx % RX_RING_SIZE;
357         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
358         int received = 0;
359
360         if (tulip_debug > 4)
361                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
362                            tp->rx_ring[entry].status);
363         /* If we own the next entry, it is a new packet. Send it up. */
364         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
365                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
366
367                 if (tulip_debug > 5)
368                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
369                                    dev->name, entry, status);
370                 if (--rx_work_limit < 0)
371                         break;
372                 if ((status & 0x38008300) != 0x0300) {
373                         if ((status & 0x38000300) != 0x0300) {
374                                 /* Ingore earlier buffers. */
375                                 if ((status & 0xffff) != 0x7fff) {
376                                         if (tulip_debug > 1)
377                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
378                                                            "spanned multiple buffers, status %8.8x!\n",
379                                                            dev->name, status);
380                                         tp->stats.rx_length_errors++;
381                                 }
382                         } else if (status & RxDescFatalErr) {
383                                 /* There was a fatal error. */
384                                 if (tulip_debug > 2)
385                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
386                                                    dev->name, status);
387                                 tp->stats.rx_errors++; /* end of a packet.*/
388                                 if (status & 0x0890) tp->stats.rx_length_errors++;
389                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
390                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
391                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
392                         }
393                 } else {
394                         /* Omit the four octet CRC from the length. */
395                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
396                         struct sk_buff *skb;
397
398 #ifndef final_version
399                         if (pkt_len > 1518) {
400                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
401                                            dev->name, pkt_len, pkt_len);
402                                 pkt_len = 1518;
403                                 tp->stats.rx_length_errors++;
404                         }
405 #endif
406
407                         /* Check if the packet is long enough to accept without copying
408                            to a minimally-sized skbuff. */
409                         if (pkt_len < tulip_rx_copybreak
410                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
411                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
412                                 pci_dma_sync_single_for_cpu(tp->pdev,
413                                                             tp->rx_buffers[entry].mapping,
414                                                             pkt_len, PCI_DMA_FROMDEVICE);
415 #if ! defined(__alpha__)
416                                 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
417                                                  pkt_len);
418                                 skb_put(skb, pkt_len);
419 #else
420                                 memcpy(skb_put(skb, pkt_len),
421                                        tp->rx_buffers[entry].skb->data,
422                                        pkt_len);
423 #endif
424                                 pci_dma_sync_single_for_device(tp->pdev,
425                                                                tp->rx_buffers[entry].mapping,
426                                                                pkt_len, PCI_DMA_FROMDEVICE);
427                         } else {        /* Pass up the skb already on the Rx ring. */
428                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
429                                                      pkt_len);
430
431 #ifndef final_version
432                                 if (tp->rx_buffers[entry].mapping !=
433                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
434                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
435                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
436                                                dev->name,
437                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
438                                                (long long)tp->rx_buffers[entry].mapping,
439                                                skb->head, temp);
440                                 }
441 #endif
442
443                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
444                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
445
446                                 tp->rx_buffers[entry].skb = NULL;
447                                 tp->rx_buffers[entry].mapping = 0;
448                         }
449                         skb->protocol = eth_type_trans(skb, dev);
450
451                         netif_rx(skb);
452
453                         dev->last_rx = jiffies;
454                         tp->stats.rx_packets++;
455                         tp->stats.rx_bytes += pkt_len;
456                 }
457                 received++;
458                 entry = (++tp->cur_rx) % RX_RING_SIZE;
459         }
460         return received;
461 }
462 #endif  /* CONFIG_TULIP_NAPI */
463
464 static inline unsigned int phy_interrupt (struct net_device *dev)
465 {
466 #ifdef __hppa__
467         struct tulip_private *tp = netdev_priv(dev);
468         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
469
470         if (csr12 != tp->csr12_shadow) {
471                 /* ack interrupt */
472                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
473                 tp->csr12_shadow = csr12;
474                 /* do link change stuff */
475                 spin_lock(&tp->lock);
476                 tulip_check_duplex(dev);
477                 spin_unlock(&tp->lock);
478                 /* clear irq ack bit */
479                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
480
481                 return 1;
482         }
483 #endif
484
485         return 0;
486 }
487
488 /* The interrupt handler does all of the Rx thread work and cleans up
489    after the Tx thread. */
490 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
491 {
492         struct net_device *dev = (struct net_device *)dev_instance;
493         struct tulip_private *tp = netdev_priv(dev);
494         void __iomem *ioaddr = tp->base_addr;
495         int csr5;
496         int missed;
497         int rx = 0;
498         int tx = 0;
499         int oi = 0;
500         int maxrx = RX_RING_SIZE;
501         int maxtx = TX_RING_SIZE;
502         int maxoi = TX_RING_SIZE;
503 #ifdef CONFIG_TULIP_NAPI
504         int rxd = 0;
505 #else
506         int entry;
507 #endif
508         unsigned int work_count = tulip_max_interrupt_work;
509         unsigned int handled = 0;
510
511         /* Let's see whether the interrupt really is for us */
512         csr5 = ioread32(ioaddr + CSR5);
513
514         if (tp->flags & HAS_PHY_IRQ)
515                 handled = phy_interrupt (dev);
516
517         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
518                 return IRQ_RETVAL(handled);
519
520         tp->nir++;
521
522         do {
523
524 #ifdef CONFIG_TULIP_NAPI
525
526                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
527                         rxd++;
528                         /* Mask RX intrs and add the device to poll list. */
529                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
530                         netif_rx_schedule(dev, &tp->napi);
531
532                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
533                                break;
534                 }
535
536                /* Acknowledge the interrupt sources we handle here ASAP
537                   the poll function does Rx and RxNoBuf acking */
538
539                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
540
541 #else
542                 /* Acknowledge all of the current interrupt sources ASAP. */
543                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
544
545
546                 if (csr5 & (RxIntr | RxNoBuf)) {
547                                 rx += tulip_rx(dev);
548                         tulip_refill_rx(dev);
549                 }
550
551 #endif /*  CONFIG_TULIP_NAPI */
552
553                 if (tulip_debug > 4)
554                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
555                                dev->name, csr5, ioread32(ioaddr + CSR5));
556
557
558                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
559                         unsigned int dirty_tx;
560
561                         spin_lock(&tp->lock);
562
563                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
564                                  dirty_tx++) {
565                                 int entry = dirty_tx % TX_RING_SIZE;
566                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
567
568                                 if (status < 0)
569                                         break;                  /* It still has not been Txed */
570
571                                 /* Check for Rx filter setup frames. */
572                                 if (tp->tx_buffers[entry].skb == NULL) {
573                                         /* test because dummy frames not mapped */
574                                         if (tp->tx_buffers[entry].mapping)
575                                                 pci_unmap_single(tp->pdev,
576                                                          tp->tx_buffers[entry].mapping,
577                                                          sizeof(tp->setup_frame),
578                                                          PCI_DMA_TODEVICE);
579                                         continue;
580                                 }
581
582                                 if (status & 0x8000) {
583                                         /* There was an major error, log it. */
584 #ifndef final_version
585                                         if (tulip_debug > 1)
586                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
587                                                            dev->name, status);
588 #endif
589                                         tp->stats.tx_errors++;
590                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
591                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
592                                         if (status & 0x0200) tp->stats.tx_window_errors++;
593                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
594                                         if ((status & 0x0080) && tp->full_duplex == 0)
595                                                 tp->stats.tx_heartbeat_errors++;
596                                 } else {
597                                         tp->stats.tx_bytes +=
598                                                 tp->tx_buffers[entry].skb->len;
599                                         tp->stats.collisions += (status >> 3) & 15;
600                                         tp->stats.tx_packets++;
601                                 }
602
603                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
604                                                  tp->tx_buffers[entry].skb->len,
605                                                  PCI_DMA_TODEVICE);
606
607                                 /* Free the original skb. */
608                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
609                                 tp->tx_buffers[entry].skb = NULL;
610                                 tp->tx_buffers[entry].mapping = 0;
611                                 tx++;
612                         }
613
614 #ifndef final_version
615                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
616                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
617                                            dev->name, dirty_tx, tp->cur_tx);
618                                 dirty_tx += TX_RING_SIZE;
619                         }
620 #endif
621
622                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
623                                 netif_wake_queue(dev);
624
625                         tp->dirty_tx = dirty_tx;
626                         if (csr5 & TxDied) {
627                                 if (tulip_debug > 2)
628                                         printk(KERN_WARNING "%s: The transmitter stopped."
629                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
630                                                    dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
631                                 tulip_restart_rxtx(tp);
632                         }
633                         spin_unlock(&tp->lock);
634                 }
635
636                 /* Log errors. */
637                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
638                         if (csr5 == 0xffffffff)
639                                 break;
640                         if (csr5 & TxJabber) tp->stats.tx_errors++;
641                         if (csr5 & TxFIFOUnderflow) {
642                                 if ((tp->csr6 & 0xC000) != 0xC000)
643                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
644                                 else
645                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
646                                 /* Restart the transmit process. */
647                                 tulip_restart_rxtx(tp);
648                                 iowrite32(0, ioaddr + CSR1);
649                         }
650                         if (csr5 & (RxDied | RxNoBuf)) {
651                                 if (tp->flags & COMET_MAC_ADDR) {
652                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
653                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
654                                 }
655                         }
656                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
657                                 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
658                                 tp->stats.rx_errors++;
659                                 tulip_start_rxtx(tp);
660                         }
661                         /*
662                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
663                          * call is ever done under the spinlock
664                          */
665                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
666                                 if (tp->link_change)
667                                         (tp->link_change)(dev, csr5);
668                         }
669                         if (csr5 & SystemError) {
670                                 int error = (csr5 >> 23) & 7;
671                                 /* oops, we hit a PCI error.  The code produced corresponds
672                                  * to the reason:
673                                  *  0 - parity error
674                                  *  1 - master abort
675                                  *  2 - target abort
676                                  * Note that on parity error, we should do a software reset
677                                  * of the chip to get it back into a sane state (according
678                                  * to the 21142/3 docs that is).
679                                  *   -- rmk
680                                  */
681                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
682                                         dev->name, tp->nir, error);
683                         }
684                         /* Clear all error sources, included undocumented ones! */
685                         iowrite32(0x0800f7ba, ioaddr + CSR5);
686                         oi++;
687                 }
688                 if (csr5 & TimerInt) {
689
690                         if (tulip_debug > 2)
691                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
692                                            dev->name, csr5);
693                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
694                         tp->ttimer = 0;
695                         oi++;
696                 }
697                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
698                         if (tulip_debug > 1)
699                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
700                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
701
702                        /* Acknowledge all interrupt sources. */
703                         iowrite32(0x8001ffff, ioaddr + CSR5);
704                         if (tp->flags & HAS_INTR_MITIGATION) {
705                      /* Josip Loncaric at ICASE did extensive experimentation
706                         to develop a good interrupt mitigation setting.*/
707                                 iowrite32(0x8b240000, ioaddr + CSR11);
708                         } else if (tp->chip_id == LC82C168) {
709                                 /* the LC82C168 doesn't have a hw timer.*/
710                                 iowrite32(0x00, ioaddr + CSR7);
711                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
712                         } else {
713                           /* Mask all interrupting sources, set timer to
714                                 re-enable. */
715                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
716                                 iowrite32(0x0012, ioaddr + CSR11);
717                         }
718                         break;
719                 }
720
721                 work_count--;
722                 if (work_count == 0)
723                         break;
724
725                 csr5 = ioread32(ioaddr + CSR5);
726
727 #ifdef CONFIG_TULIP_NAPI
728                 if (rxd)
729                         csr5 &= ~RxPollInt;
730         } while ((csr5 & (TxNoBuf |
731                           TxDied |
732                           TxIntr |
733                           TimerInt |
734                           /* Abnormal intr. */
735                           RxDied |
736                           TxFIFOUnderflow |
737                           TxJabber |
738                           TPLnkFail |
739                           SystemError )) != 0);
740 #else
741         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
742
743         tulip_refill_rx(dev);
744
745         /* check if the card is in suspend mode */
746         entry = tp->dirty_rx % RX_RING_SIZE;
747         if (tp->rx_buffers[entry].skb == NULL) {
748                 if (tulip_debug > 1)
749                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
750                 if (tp->chip_id == LC82C168) {
751                         iowrite32(0x00, ioaddr + CSR7);
752                         mod_timer(&tp->timer, RUN_AT(HZ/50));
753                 } else {
754                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
755                                 if (tulip_debug > 1)
756                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
757                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
758                                         ioaddr + CSR7);
759                                 iowrite32(TimerInt, ioaddr + CSR5);
760                                 iowrite32(12, ioaddr + CSR11);
761                                 tp->ttimer = 1;
762                         }
763                 }
764         }
765 #endif /* CONFIG_TULIP_NAPI */
766
767         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
768                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
769         }
770
771         if (tulip_debug > 4)
772                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
773                            dev->name, ioread32(ioaddr + CSR5));
774
775         return IRQ_HANDLED;
776 }