2 drivers/net/tulip/interrupt.c
4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
17 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
26 #define MIT_TABLE 15 /* We use 0 or max */
28 static unsigned int mit_table[MIT_SIZE+1] =
30 /* CRS11 21143 hardware Mitigation Control Interrupt
31 We use only RX mitigation we other techniques for
34 31 Cycle Size (timer control)
35 30:27 TX timer in 16 * Cycle size
36 26:24 TX No pkts before Int.
37 23:20 RX timer in Cycle size
38 19:17 RX No pkts before Int.
39 16 Continues Mode (CM)
42 0x0, /* IM disabled */
43 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
57 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
58 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
63 int tulip_refill_rx(struct net_device *dev)
65 struct tulip_private *tp = netdev_priv(dev);
69 /* Refill the Rx ring buffers. */
70 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71 entry = tp->dirty_rx % RX_RING_SIZE;
72 if (tp->rx_buffers[entry].skb == NULL) {
76 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
80 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
82 tp->rx_buffers[entry].mapping = mapping;
84 skb->dev = dev; /* Mark as being used by this device. */
85 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
88 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90 if(tp->chip_id == LC82C168) {
91 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92 /* Rx stopped due to out of buffers,
95 iowrite32(0x01, tp->base_addr + CSR2);
101 #ifdef CONFIG_TULIP_NAPI
103 void oom_timer(unsigned long data)
105 struct net_device *dev = (struct net_device *)data;
106 struct tulip_private *tp = netdev_priv(dev);
107 netif_rx_schedule(dev, &tp->napi);
110 int tulip_poll(struct napi_struct *napi, int budget)
112 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
113 struct net_device *dev = tp->dev;
114 int entry = tp->cur_rx % RX_RING_SIZE;
116 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120 if (!netif_running(dev))
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
125 /* that one buffer is needed for mit activation; or might be a
126 bug in the ring buffer code; check later -- JHS*/
128 if (budget >=RX_RING_SIZE) budget--;
132 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133 tp->rx_ring[entry].status);
136 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
137 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
140 /* Acknowledge current RX interrupt sources. */
141 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
144 /* If we own the next entry, it is a new packet. Send it up. */
145 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
146 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
152 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
153 dev->name, entry, status);
155 if (++work_done >= budget)
158 if ((status & 0x38008300) != 0x0300) {
159 if ((status & 0x38000300) != 0x0300) {
160 /* Ingore earlier buffers. */
161 if ((status & 0xffff) != 0x7fff) {
163 printk(KERN_WARNING "%s: Oversized Ethernet frame "
164 "spanned multiple buffers, status %8.8x!\n",
166 tp->stats.rx_length_errors++;
168 } else if (status & RxDescFatalErr) {
169 /* There was a fatal error. */
171 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
173 tp->stats.rx_errors++; /* end of a packet.*/
174 if (status & 0x0890) tp->stats.rx_length_errors++;
175 if (status & 0x0004) tp->stats.rx_frame_errors++;
176 if (status & 0x0002) tp->stats.rx_crc_errors++;
177 if (status & 0x0001) tp->stats.rx_fifo_errors++;
180 /* Omit the four octet CRC from the length. */
181 short pkt_len = ((status >> 16) & 0x7ff) - 4;
184 #ifndef final_version
185 if (pkt_len > 1518) {
186 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
187 dev->name, pkt_len, pkt_len);
189 tp->stats.rx_length_errors++;
192 /* Check if the packet is long enough to accept without copying
193 to a minimally-sized skbuff. */
194 if (pkt_len < tulip_rx_copybreak
195 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
196 skb_reserve(skb, 2); /* 16 byte align the IP header */
197 pci_dma_sync_single_for_cpu(tp->pdev,
198 tp->rx_buffers[entry].mapping,
199 pkt_len, PCI_DMA_FROMDEVICE);
200 #if ! defined(__alpha__)
201 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
203 skb_put(skb, pkt_len);
205 memcpy(skb_put(skb, pkt_len),
206 tp->rx_buffers[entry].skb->data,
209 pci_dma_sync_single_for_device(tp->pdev,
210 tp->rx_buffers[entry].mapping,
211 pkt_len, PCI_DMA_FROMDEVICE);
212 } else { /* Pass up the skb already on the Rx ring. */
213 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
216 #ifndef final_version
217 if (tp->rx_buffers[entry].mapping !=
218 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
219 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
220 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
222 le32_to_cpu(tp->rx_ring[entry].buffer1),
223 (unsigned long long)tp->rx_buffers[entry].mapping,
228 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
229 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
231 tp->rx_buffers[entry].skb = NULL;
232 tp->rx_buffers[entry].mapping = 0;
234 skb->protocol = eth_type_trans(skb, dev);
236 netif_receive_skb(skb);
238 dev->last_rx = jiffies;
239 tp->stats.rx_packets++;
240 tp->stats.rx_bytes += pkt_len;
242 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
246 entry = (++tp->cur_rx) % RX_RING_SIZE;
247 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
248 tulip_refill_rx(dev);
252 /* New ack strategy... irq does not ack Rx any longer
253 hopefully this helps */
255 /* Really bad things can happen here... If new packet arrives
256 * and an irq arrives (tx or just due to occasionally unset
257 * mask), it will be acked by irq handler, but new thread
258 * is not scheduled. It is major hole in design.
259 * No idea how to fix this if "playing with fire" will fail
260 * tomorrow (night 011029). If it will not fail, we won
261 * finally: amount of IO did not increase at all. */
262 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
266 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
268 /* We use this simplistic scheme for IM. It's proven by
269 real life installations. We can have IM enabled
270 continuesly but this would cause unnecessary latency.
271 Unfortunely we can't use all the NET_RX_* feedback here.
272 This would turn on IM for devices that is not contributing
273 to backlog congestion with unnecessary latency.
275 We monitor the device RX-ring and have:
277 HW Interrupt Mitigation either ON or OFF.
279 ON: More then 1 pkt received (per intr.) OR we are dropping
280 OFF: Only 1 pkt received
282 Note. We only use min and max (0, 15) settings from mit_table */
285 if( tp->flags & HAS_INTR_MITIGATION) {
289 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
295 iowrite32(0, tp->base_addr + CSR11);
300 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
302 tulip_refill_rx(dev);
304 /* If RX ring is not full we are out of memory. */
305 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
308 /* Remove us from polling list and enable RX intr. */
310 netif_rx_complete(dev, napi);
311 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
313 /* The last op happens after poll completion. Which means the following:
314 * 1. it can race with disabling irqs in irq handler
315 * 2. it can race with dise/enabling irqs in other poll threads
316 * 3. if an irq raised after beginning loop, it will be immediately
319 * Summarizing: the logic results in some redundant irqs both
320 * due to races in masking and due to too late acking of already
321 * processed irqs. But it must not result in losing events.
327 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
328 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
329 tulip_refill_rx(dev);
331 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
336 oom: /* Executed with RX ints disabled */
338 /* Start timer, stop polling, but do not enable rx interrupts. */
339 mod_timer(&tp->oom_timer, jiffies+1);
341 /* Think: timer_pending() was an explicit signature of bug.
342 * Timer can be pending now but fired and completed
343 * before we did netif_rx_complete(). See? We would lose it. */
345 /* remove ourselves from the polling list */
346 netif_rx_complete(dev, napi);
351 #else /* CONFIG_TULIP_NAPI */
353 static int tulip_rx(struct net_device *dev)
355 struct tulip_private *tp = netdev_priv(dev);
356 int entry = tp->cur_rx % RX_RING_SIZE;
357 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
361 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
362 tp->rx_ring[entry].status);
363 /* If we own the next entry, it is a new packet. Send it up. */
364 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
365 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
368 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
369 dev->name, entry, status);
370 if (--rx_work_limit < 0)
372 if ((status & 0x38008300) != 0x0300) {
373 if ((status & 0x38000300) != 0x0300) {
374 /* Ingore earlier buffers. */
375 if ((status & 0xffff) != 0x7fff) {
377 printk(KERN_WARNING "%s: Oversized Ethernet frame "
378 "spanned multiple buffers, status %8.8x!\n",
380 tp->stats.rx_length_errors++;
382 } else if (status & RxDescFatalErr) {
383 /* There was a fatal error. */
385 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
387 tp->stats.rx_errors++; /* end of a packet.*/
388 if (status & 0x0890) tp->stats.rx_length_errors++;
389 if (status & 0x0004) tp->stats.rx_frame_errors++;
390 if (status & 0x0002) tp->stats.rx_crc_errors++;
391 if (status & 0x0001) tp->stats.rx_fifo_errors++;
394 /* Omit the four octet CRC from the length. */
395 short pkt_len = ((status >> 16) & 0x7ff) - 4;
398 #ifndef final_version
399 if (pkt_len > 1518) {
400 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
401 dev->name, pkt_len, pkt_len);
403 tp->stats.rx_length_errors++;
407 /* Check if the packet is long enough to accept without copying
408 to a minimally-sized skbuff. */
409 if (pkt_len < tulip_rx_copybreak
410 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
411 skb_reserve(skb, 2); /* 16 byte align the IP header */
412 pci_dma_sync_single_for_cpu(tp->pdev,
413 tp->rx_buffers[entry].mapping,
414 pkt_len, PCI_DMA_FROMDEVICE);
415 #if ! defined(__alpha__)
416 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
418 skb_put(skb, pkt_len);
420 memcpy(skb_put(skb, pkt_len),
421 tp->rx_buffers[entry].skb->data,
424 pci_dma_sync_single_for_device(tp->pdev,
425 tp->rx_buffers[entry].mapping,
426 pkt_len, PCI_DMA_FROMDEVICE);
427 } else { /* Pass up the skb already on the Rx ring. */
428 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
431 #ifndef final_version
432 if (tp->rx_buffers[entry].mapping !=
433 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
434 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
435 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
437 le32_to_cpu(tp->rx_ring[entry].buffer1),
438 (long long)tp->rx_buffers[entry].mapping,
443 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
444 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
446 tp->rx_buffers[entry].skb = NULL;
447 tp->rx_buffers[entry].mapping = 0;
449 skb->protocol = eth_type_trans(skb, dev);
453 dev->last_rx = jiffies;
454 tp->stats.rx_packets++;
455 tp->stats.rx_bytes += pkt_len;
458 entry = (++tp->cur_rx) % RX_RING_SIZE;
462 #endif /* CONFIG_TULIP_NAPI */
464 static inline unsigned int phy_interrupt (struct net_device *dev)
467 struct tulip_private *tp = netdev_priv(dev);
468 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
470 if (csr12 != tp->csr12_shadow) {
472 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
473 tp->csr12_shadow = csr12;
474 /* do link change stuff */
475 spin_lock(&tp->lock);
476 tulip_check_duplex(dev);
477 spin_unlock(&tp->lock);
478 /* clear irq ack bit */
479 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
488 /* The interrupt handler does all of the Rx thread work and cleans up
489 after the Tx thread. */
490 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
492 struct net_device *dev = (struct net_device *)dev_instance;
493 struct tulip_private *tp = netdev_priv(dev);
494 void __iomem *ioaddr = tp->base_addr;
500 int maxrx = RX_RING_SIZE;
501 int maxtx = TX_RING_SIZE;
502 int maxoi = TX_RING_SIZE;
503 #ifdef CONFIG_TULIP_NAPI
508 unsigned int work_count = tulip_max_interrupt_work;
509 unsigned int handled = 0;
511 /* Let's see whether the interrupt really is for us */
512 csr5 = ioread32(ioaddr + CSR5);
514 if (tp->flags & HAS_PHY_IRQ)
515 handled = phy_interrupt (dev);
517 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
518 return IRQ_RETVAL(handled);
524 #ifdef CONFIG_TULIP_NAPI
526 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
528 /* Mask RX intrs and add the device to poll list. */
529 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
530 netif_rx_schedule(dev, &tp->napi);
532 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
536 /* Acknowledge the interrupt sources we handle here ASAP
537 the poll function does Rx and RxNoBuf acking */
539 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
542 /* Acknowledge all of the current interrupt sources ASAP. */
543 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
546 if (csr5 & (RxIntr | RxNoBuf)) {
548 tulip_refill_rx(dev);
551 #endif /* CONFIG_TULIP_NAPI */
554 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
555 dev->name, csr5, ioread32(ioaddr + CSR5));
558 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
559 unsigned int dirty_tx;
561 spin_lock(&tp->lock);
563 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
565 int entry = dirty_tx % TX_RING_SIZE;
566 int status = le32_to_cpu(tp->tx_ring[entry].status);
569 break; /* It still has not been Txed */
571 /* Check for Rx filter setup frames. */
572 if (tp->tx_buffers[entry].skb == NULL) {
573 /* test because dummy frames not mapped */
574 if (tp->tx_buffers[entry].mapping)
575 pci_unmap_single(tp->pdev,
576 tp->tx_buffers[entry].mapping,
577 sizeof(tp->setup_frame),
582 if (status & 0x8000) {
583 /* There was an major error, log it. */
584 #ifndef final_version
586 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
589 tp->stats.tx_errors++;
590 if (status & 0x4104) tp->stats.tx_aborted_errors++;
591 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
592 if (status & 0x0200) tp->stats.tx_window_errors++;
593 if (status & 0x0002) tp->stats.tx_fifo_errors++;
594 if ((status & 0x0080) && tp->full_duplex == 0)
595 tp->stats.tx_heartbeat_errors++;
597 tp->stats.tx_bytes +=
598 tp->tx_buffers[entry].skb->len;
599 tp->stats.collisions += (status >> 3) & 15;
600 tp->stats.tx_packets++;
603 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
604 tp->tx_buffers[entry].skb->len,
607 /* Free the original skb. */
608 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
609 tp->tx_buffers[entry].skb = NULL;
610 tp->tx_buffers[entry].mapping = 0;
614 #ifndef final_version
615 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
616 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
617 dev->name, dirty_tx, tp->cur_tx);
618 dirty_tx += TX_RING_SIZE;
622 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
623 netif_wake_queue(dev);
625 tp->dirty_tx = dirty_tx;
628 printk(KERN_WARNING "%s: The transmitter stopped."
629 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
630 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
631 tulip_restart_rxtx(tp);
633 spin_unlock(&tp->lock);
637 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
638 if (csr5 == 0xffffffff)
640 if (csr5 & TxJabber) tp->stats.tx_errors++;
641 if (csr5 & TxFIFOUnderflow) {
642 if ((tp->csr6 & 0xC000) != 0xC000)
643 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
645 tp->csr6 |= 0x00200000; /* Store-n-forward. */
646 /* Restart the transmit process. */
647 tulip_restart_rxtx(tp);
648 iowrite32(0, ioaddr + CSR1);
650 if (csr5 & (RxDied | RxNoBuf)) {
651 if (tp->flags & COMET_MAC_ADDR) {
652 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
653 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
656 if (csr5 & RxDied) { /* Missed a Rx frame. */
657 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
658 tp->stats.rx_errors++;
659 tulip_start_rxtx(tp);
662 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
663 * call is ever done under the spinlock
665 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
667 (tp->link_change)(dev, csr5);
669 if (csr5 & SystemError) {
670 int error = (csr5 >> 23) & 7;
671 /* oops, we hit a PCI error. The code produced corresponds
676 * Note that on parity error, we should do a software reset
677 * of the chip to get it back into a sane state (according
678 * to the 21142/3 docs that is).
681 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
682 dev->name, tp->nir, error);
684 /* Clear all error sources, included undocumented ones! */
685 iowrite32(0x0800f7ba, ioaddr + CSR5);
688 if (csr5 & TimerInt) {
691 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
693 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
697 if (tx > maxtx || rx > maxrx || oi > maxoi) {
699 printk(KERN_WARNING "%s: Too much work during an interrupt, "
700 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
702 /* Acknowledge all interrupt sources. */
703 iowrite32(0x8001ffff, ioaddr + CSR5);
704 if (tp->flags & HAS_INTR_MITIGATION) {
705 /* Josip Loncaric at ICASE did extensive experimentation
706 to develop a good interrupt mitigation setting.*/
707 iowrite32(0x8b240000, ioaddr + CSR11);
708 } else if (tp->chip_id == LC82C168) {
709 /* the LC82C168 doesn't have a hw timer.*/
710 iowrite32(0x00, ioaddr + CSR7);
711 mod_timer(&tp->timer, RUN_AT(HZ/50));
713 /* Mask all interrupting sources, set timer to
715 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
716 iowrite32(0x0012, ioaddr + CSR11);
725 csr5 = ioread32(ioaddr + CSR5);
727 #ifdef CONFIG_TULIP_NAPI
730 } while ((csr5 & (TxNoBuf |
739 SystemError )) != 0);
741 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
743 tulip_refill_rx(dev);
745 /* check if the card is in suspend mode */
746 entry = tp->dirty_rx % RX_RING_SIZE;
747 if (tp->rx_buffers[entry].skb == NULL) {
749 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
750 if (tp->chip_id == LC82C168) {
751 iowrite32(0x00, ioaddr + CSR7);
752 mod_timer(&tp->timer, RUN_AT(HZ/50));
754 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
756 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
757 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
759 iowrite32(TimerInt, ioaddr + CSR5);
760 iowrite32(12, ioaddr + CSR11);
765 #endif /* CONFIG_TULIP_NAPI */
767 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
768 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
772 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
773 dev->name, ioread32(ioaddr + CSR5));