]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/comedi/drivers/mite.c
Merge branch 'cec-defines' into for-linus
[karo-tx-linux.git] / drivers / staging / comedi / drivers / mite.c
1 /*
2  * comedi/drivers/mite.c
3  * Hardware driver for NI Mite PCI interface chip
4  *
5  * COMEDI - Linux Control and Measurement Device Interface
6  * Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18
19 /*
20  * The PCI-MIO E series driver was originally written by
21  * Tomasz Motylewski <...>, and ported to comedi by ds.
22  *
23  * References for specifications:
24  *
25  *    321747b.pdf  Register Level Programmer Manual (obsolete)
26  *    321747c.pdf  Register Level Programmer Manual (new)
27  *    DAQ-STC reference manual
28  *
29  * Other possibly relevant info:
30  *
31  *    320517c.pdf  User manual (obsolete)
32  *    320517f.pdf  User manual (new)
33  *    320889a.pdf  delete
34  *    320906c.pdf  maximum signal ratings
35  *    321066a.pdf  about 16x
36  *    321791a.pdf  discontinuation of at-mio-16e-10 rev. c
37  *    321808a.pdf  about at-mio-16e-10 rev P
38  *    321837a.pdf  discontinuation of at-mio-16de-10 rev d
39  *    321838a.pdf  about at-mio-16de-10 rev N
40  *
41  * ISSUES:
42  *
43  */
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/module.h>
48 #include <linux/slab.h>
49
50 #include "../comedi_pci.h"
51
52 #include "mite.h"
53
54 #define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
55
56 struct mite_struct *mite_alloc(struct pci_dev *pcidev)
57 {
58         struct mite_struct *mite;
59         unsigned int i;
60
61         mite = kzalloc(sizeof(*mite), GFP_KERNEL);
62         if (mite) {
63                 spin_lock_init(&mite->lock);
64                 mite->pcidev = pcidev;
65                 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
66                         mite->channels[i].mite = mite;
67                         mite->channels[i].channel = i;
68                         mite->channels[i].done = 1;
69                 }
70         }
71         return mite;
72 }
73 EXPORT_SYMBOL_GPL(mite_alloc);
74
75 static void dump_chip_signature(u32 csigr_bits)
76 {
77         pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
78                 mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
79                 mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
80         pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
81                 mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
82                 mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
83 }
84
85 static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
86 {
87         unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
88         unsigned empty_count = (fcr_bits >> 16) & 0xff;
89         unsigned full_count = fcr_bits & 0xff;
90
91         return empty_count + full_count;
92 }
93
94 int mite_setup2(struct comedi_device *dev,
95                 struct mite_struct *mite, bool use_win1)
96 {
97         unsigned long length;
98         int i;
99         u32 csigr_bits;
100         unsigned unknown_dma_burst_bits;
101
102         pci_set_master(mite->pcidev);
103
104         mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
105         if (!mite->mite_io_addr) {
106                 dev_err(dev->class_dev,
107                         "Failed to remap mite io memory address\n");
108                 return -ENOMEM;
109         }
110         mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
111
112         dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
113         if (!dev->mmio) {
114                 dev_err(dev->class_dev,
115                         "Failed to remap daq io memory address\n");
116                 return -ENOMEM;
117         }
118         mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
119         length = pci_resource_len(mite->pcidev, 1);
120
121         if (use_win1) {
122                 writel(0, mite->mite_io_addr + MITE_IODWBSR);
123                 dev_info(dev->class_dev,
124                          "using I/O Window Base Size register 1\n");
125                 writel(mite->daq_phys_addr | WENAB |
126                        MITE_IODWBSR_1_WSIZE_bits(length),
127                        mite->mite_io_addr + MITE_IODWBSR_1);
128                 writel(0, mite->mite_io_addr + MITE_IODWCR_1);
129         } else {
130                 writel(mite->daq_phys_addr | WENAB,
131                        mite->mite_io_addr + MITE_IODWBSR);
132         }
133         /*
134          * Make sure dma bursts work. I got this from running a bus analyzer
135          * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
136          * of 0x61f and bursts worked. 6281 powered up with register value of
137          * 0x1f and bursts didn't work. The NI windows driver reads the
138          * register, then does a bitwise-or of 0x600 with it and writes it back.
139          */
140         unknown_dma_burst_bits =
141             readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
142         unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
143         writel(unknown_dma_burst_bits,
144                mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
145
146         csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
147         mite->num_channels = mite_csigr_dmac(csigr_bits);
148         if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
149                 dev_warn(dev->class_dev,
150                          "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
151                          mite->num_channels, MAX_MITE_DMA_CHANNELS);
152                 mite->num_channels = MAX_MITE_DMA_CHANNELS;
153         }
154         dump_chip_signature(csigr_bits);
155         for (i = 0; i < mite->num_channels; i++) {
156                 writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
157                 /* disable interrupts */
158                 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
159                        CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
160                        CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
161                        mite->mite_io_addr + MITE_CHCR(i));
162         }
163         mite->fifo_size = mite_fifo_size(mite, 0);
164         dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
165         return 0;
166 }
167 EXPORT_SYMBOL_GPL(mite_setup2);
168
169 void mite_detach(struct mite_struct *mite)
170 {
171         if (!mite)
172                 return;
173
174         if (mite->mite_io_addr)
175                 iounmap(mite->mite_io_addr);
176
177         kfree(mite);
178 }
179 EXPORT_SYMBOL_GPL(mite_detach);
180
181 struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
182 {
183         struct mite_dma_descriptor_ring *ring =
184             kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
185
186         if (!ring)
187                 return NULL;
188         ring->hw_dev = get_device(&mite->pcidev->dev);
189         if (!ring->hw_dev) {
190                 kfree(ring);
191                 return NULL;
192         }
193         ring->n_links = 0;
194         ring->descriptors = NULL;
195         ring->descriptors_dma_addr = 0;
196         return ring;
197 };
198 EXPORT_SYMBOL_GPL(mite_alloc_ring);
199
200 void mite_free_ring(struct mite_dma_descriptor_ring *ring)
201 {
202         if (ring) {
203                 if (ring->descriptors) {
204                         dma_free_coherent(ring->hw_dev,
205                                           ring->n_links *
206                                           sizeof(struct mite_dma_descriptor),
207                                           ring->descriptors,
208                                           ring->descriptors_dma_addr);
209                 }
210                 put_device(ring->hw_dev);
211                 kfree(ring);
212         }
213 };
214 EXPORT_SYMBOL_GPL(mite_free_ring);
215
216 struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
217                                                    struct
218                                                    mite_dma_descriptor_ring
219                                                    *ring, unsigned min_channel,
220                                                    unsigned max_channel)
221 {
222         int i;
223         unsigned long flags;
224         struct mite_channel *channel = NULL;
225
226         /*
227          * spin lock so mite_release_channel can be called safely
228          * from interrupts
229          */
230         spin_lock_irqsave(&mite->lock, flags);
231         for (i = min_channel; i <= max_channel; ++i) {
232                 if (mite->channel_allocated[i] == 0) {
233                         mite->channel_allocated[i] = 1;
234                         channel = &mite->channels[i];
235                         channel->ring = ring;
236                         break;
237                 }
238         }
239         spin_unlock_irqrestore(&mite->lock, flags);
240         return channel;
241 }
242 EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
243
244 void mite_release_channel(struct mite_channel *mite_chan)
245 {
246         struct mite_struct *mite = mite_chan->mite;
247         unsigned long flags;
248
249         /* spin lock to prevent races with mite_request_channel */
250         spin_lock_irqsave(&mite->lock, flags);
251         if (mite->channel_allocated[mite_chan->channel]) {
252                 mite_dma_disarm(mite_chan);
253                 mite_dma_reset(mite_chan);
254                 /*
255                  * disable all channel's interrupts (do it after disarm/reset so
256                  * MITE_CHCR reg isn't changed while dma is still active!)
257                  */
258                 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
259                        CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
260                        CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
261                        CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
262                        mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
263                 mite->channel_allocated[mite_chan->channel] = 0;
264                 mite_chan->ring = NULL;
265                 mmiowb();
266         }
267         spin_unlock_irqrestore(&mite->lock, flags);
268 }
269 EXPORT_SYMBOL_GPL(mite_release_channel);
270
271 void mite_dma_arm(struct mite_channel *mite_chan)
272 {
273         struct mite_struct *mite = mite_chan->mite;
274         int chor;
275         unsigned long flags;
276
277         /*
278          * memory barrier is intended to insure any twiddling with the buffer
279          * is done before writing to the mite to arm dma transfer
280          */
281         smp_mb();
282         /* arm */
283         chor = CHOR_START;
284         spin_lock_irqsave(&mite->lock, flags);
285         mite_chan->done = 0;
286         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
287         mmiowb();
288         spin_unlock_irqrestore(&mite->lock, flags);
289         /* mite_dma_tcr(mite, channel); */
290 }
291 EXPORT_SYMBOL_GPL(mite_dma_arm);
292
293 /**************************************/
294
295 int mite_buf_change(struct mite_dma_descriptor_ring *ring,
296                     struct comedi_subdevice *s)
297 {
298         struct comedi_async *async = s->async;
299         unsigned int n_links;
300
301         if (ring->descriptors) {
302                 dma_free_coherent(ring->hw_dev,
303                                   ring->n_links *
304                                   sizeof(struct mite_dma_descriptor),
305                                   ring->descriptors,
306                                   ring->descriptors_dma_addr);
307         }
308         ring->descriptors = NULL;
309         ring->descriptors_dma_addr = 0;
310         ring->n_links = 0;
311
312         if (async->prealloc_bufsz == 0)
313                 return 0;
314
315         n_links = async->prealloc_bufsz >> PAGE_SHIFT;
316
317         ring->descriptors =
318             dma_alloc_coherent(ring->hw_dev,
319                                n_links * sizeof(struct mite_dma_descriptor),
320                                &ring->descriptors_dma_addr, GFP_KERNEL);
321         if (!ring->descriptors) {
322                 dev_err(s->device->class_dev,
323                         "mite: ring buffer allocation failed\n");
324                 return -ENOMEM;
325         }
326         ring->n_links = n_links;
327
328         return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
329 }
330 EXPORT_SYMBOL_GPL(mite_buf_change);
331
332 /*
333  * initializes the ring buffer descriptors to provide correct DMA transfer links
334  * to the exact amount of memory required.  When the ring buffer is allocated in
335  * mite_buf_change, the default is to initialize the ring to refer to the entire
336  * DMA data buffer.  A command may call this function later to re-initialize and
337  * shorten the amount of memory that will be transferred.
338  */
339 int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
340                                struct comedi_subdevice *s,
341                                unsigned int nbytes)
342 {
343         struct comedi_async *async = s->async;
344         unsigned int n_full_links = nbytes >> PAGE_SHIFT;
345         unsigned int remainder = nbytes % PAGE_SIZE;
346         int i;
347
348         dev_dbg(s->device->class_dev,
349                 "mite: init ring buffer to %u bytes\n", nbytes);
350
351         if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
352                 dev_err(s->device->class_dev,
353                         "mite: ring buffer too small for requested init\n");
354                 return -ENOMEM;
355         }
356
357         /* We set the descriptors for all full links. */
358         for (i = 0; i < n_full_links; ++i) {
359                 ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
360                 ring->descriptors[i].addr =
361                     cpu_to_le32(async->buf_map->page_list[i].dma_addr);
362                 ring->descriptors[i].next =
363                     cpu_to_le32(ring->descriptors_dma_addr +
364                                 (i + 1) * sizeof(struct mite_dma_descriptor));
365         }
366
367         /* the last link is either a remainder or was a full link. */
368         if (remainder > 0) {
369                 /* set the lesser count for the remainder link */
370                 ring->descriptors[i].count = cpu_to_le32(remainder);
371                 ring->descriptors[i].addr =
372                     cpu_to_le32(async->buf_map->page_list[i].dma_addr);
373                 /* increment i so that assignment below refs last link */
374                 ++i;
375         }
376
377         /* Assign the last link->next to point back to the head of the list. */
378         ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr);
379
380         /*
381          * barrier is meant to insure that all the writes to the dma descriptors
382          * have completed before the dma controller is commanded to read them
383          */
384         smp_wmb();
385         return 0;
386 }
387 EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
388
389 void mite_prep_dma(struct mite_channel *mite_chan,
390                    unsigned int num_device_bits, unsigned int num_memory_bits)
391 {
392         unsigned int chor, chcr, mcr, dcr, lkcr;
393         struct mite_struct *mite = mite_chan->mite;
394
395         /* reset DMA and FIFO */
396         chor = CHOR_DMARESET | CHOR_FRESET;
397         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
398
399         /* short link chaining mode */
400         chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
401             CHCR_BURSTEN;
402         /*
403          * Link Complete Interrupt: interrupt every time a link
404          * in MITE_RING is completed. This can generate a lot of
405          * extra interrupts, but right now we update the values
406          * of buf_int_ptr and buf_int_count at each interrupt. A
407          * better method is to poll the MITE before each user
408          * "read()" to calculate the number of bytes available.
409          */
410         chcr |= CHCR_SET_LC_IE;
411         if (num_memory_bits == 32 && num_device_bits == 16) {
412                 /*
413                  * Doing a combined 32 and 16 bit byteswap gets the 16 bit
414                  * samples into the fifo in the right order. Tested doing 32 bit
415                  * memory to 16 bit device transfers to the analog out of a
416                  * pxi-6281, which has mite version = 1, type = 4. This also
417                  * works for dma reads from the counters on e-series boards.
418                  */
419                 chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
420         }
421         if (mite_chan->dir == COMEDI_INPUT)
422                 chcr |= CHCR_DEV_TO_MEM;
423
424         writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
425
426         /* to/from memory */
427         mcr = CR_RL(64) | CR_ASEQUP;
428         switch (num_memory_bits) {
429         case 8:
430                 mcr |= CR_PSIZE8;
431                 break;
432         case 16:
433                 mcr |= CR_PSIZE16;
434                 break;
435         case 32:
436                 mcr |= CR_PSIZE32;
437                 break;
438         default:
439                 pr_warn("bug! invalid mem bit width for dma transfer\n");
440                 break;
441         }
442         writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
443
444         /* from/to device */
445         dcr = CR_RL(64) | CR_ASEQUP;
446         dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
447         switch (num_device_bits) {
448         case 8:
449                 dcr |= CR_PSIZE8;
450                 break;
451         case 16:
452                 dcr |= CR_PSIZE16;
453                 break;
454         case 32:
455                 dcr |= CR_PSIZE32;
456                 break;
457         default:
458                 pr_warn("bug! invalid dev bit width for dma transfer\n");
459                 break;
460         }
461         writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
462
463         /* reset the DAR */
464         writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
465
466         /* the link is 32bits */
467         lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
468         writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
469
470         /* starting address for link chaining */
471         writel(mite_chan->ring->descriptors_dma_addr,
472                mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
473 }
474 EXPORT_SYMBOL_GPL(mite_prep_dma);
475
476 static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
477 {
478         struct mite_struct *mite = mite_chan->mite;
479
480         return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
481 }
482
483 u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
484 {
485         struct mite_struct *mite = mite_chan->mite;
486
487         return readl(mite->mite_io_addr +
488                      MITE_FCR(mite_chan->channel)) & 0x000000FF;
489 }
490 EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
491
492 /* returns lower bound for number of bytes transferred from device to memory */
493 u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
494 {
495         u32 device_byte_count;
496
497         device_byte_count = mite_device_bytes_transferred(mite_chan);
498         return device_byte_count - mite_bytes_in_transit(mite_chan);
499 }
500 EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
501
502 /* returns upper bound for number of bytes transferred from device to memory */
503 u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
504 {
505         u32 in_transit_count;
506
507         in_transit_count = mite_bytes_in_transit(mite_chan);
508         return mite_device_bytes_transferred(mite_chan) - in_transit_count;
509 }
510 EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
511
512 /* returns lower bound for number of bytes read from memory to device */
513 u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
514 {
515         u32 device_byte_count;
516
517         device_byte_count = mite_device_bytes_transferred(mite_chan);
518         return device_byte_count + mite_bytes_in_transit(mite_chan);
519 }
520 EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
521
522 /* returns upper bound for number of bytes read from memory to device */
523 u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
524 {
525         u32 in_transit_count;
526
527         in_transit_count = mite_bytes_in_transit(mite_chan);
528         return mite_device_bytes_transferred(mite_chan) + in_transit_count;
529 }
530 EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
531
532 unsigned mite_dma_tcr(struct mite_channel *mite_chan)
533 {
534         struct mite_struct *mite = mite_chan->mite;
535
536         return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
537 }
538 EXPORT_SYMBOL_GPL(mite_dma_tcr);
539
540 void mite_dma_disarm(struct mite_channel *mite_chan)
541 {
542         struct mite_struct *mite = mite_chan->mite;
543         unsigned chor;
544
545         /* disarm */
546         chor = CHOR_ABORT;
547         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
548 }
549 EXPORT_SYMBOL_GPL(mite_dma_disarm);
550
551 int mite_sync_input_dma(struct mite_channel *mite_chan,
552                         struct comedi_subdevice *s)
553 {
554         struct comedi_async *async = s->async;
555         int count;
556         unsigned int nbytes, old_alloc_count;
557
558         old_alloc_count = async->buf_write_alloc_count;
559         /* write alloc as much as we can */
560         comedi_buf_write_alloc(s, async->prealloc_bufsz);
561
562         nbytes = mite_bytes_written_to_memory_lb(mite_chan);
563         if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
564                   old_alloc_count) > 0) {
565                 dev_warn(s->device->class_dev,
566                          "mite: DMA overwrite of free area\n");
567                 async->events |= COMEDI_CB_OVERFLOW;
568                 return -1;
569         }
570
571         count = nbytes - async->buf_write_count;
572         /*
573          * it's possible count will be negative due to conservative value
574          * returned by mite_bytes_written_to_memory_lb
575          */
576         if (count <= 0)
577                 return 0;
578
579         comedi_buf_write_free(s, count);
580         comedi_inc_scan_progress(s, count);
581         async->events |= COMEDI_CB_BLOCK;
582         return 0;
583 }
584 EXPORT_SYMBOL_GPL(mite_sync_input_dma);
585
586 int mite_sync_output_dma(struct mite_channel *mite_chan,
587                          struct comedi_subdevice *s)
588 {
589         struct comedi_async *async = s->async;
590         struct comedi_cmd *cmd = &async->cmd;
591         u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
592         unsigned int old_alloc_count = async->buf_read_alloc_count;
593         u32 nbytes_ub, nbytes_lb;
594         int count;
595         bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
596
597         /* read alloc as much as we can */
598         comedi_buf_read_alloc(s, async->prealloc_bufsz);
599         nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
600         if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
601                 nbytes_lb = stop_count;
602         nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
603         if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
604                 nbytes_ub = stop_count;
605
606         if ((!finite_regen || stop_count > old_alloc_count) &&
607             ((int)(nbytes_ub - old_alloc_count) > 0)) {
608                 dev_warn(s->device->class_dev, "mite: DMA underrun\n");
609                 async->events |= COMEDI_CB_OVERFLOW;
610                 return -1;
611         }
612
613         if (finite_regen) {
614                 /*
615                  * This is a special case where we continuously output a finite
616                  * buffer.  In this case, we do not free any of the memory,
617                  * hence we expect that old_alloc_count will reach a maximum of
618                  * stop_count bytes.
619                  */
620                 return 0;
621         }
622
623         count = nbytes_lb - async->buf_read_count;
624         if (count <= 0)
625                 return 0;
626
627         if (count) {
628                 comedi_buf_read_free(s, count);
629                 async->events |= COMEDI_CB_BLOCK;
630         }
631         return 0;
632 }
633 EXPORT_SYMBOL_GPL(mite_sync_output_dma);
634
635 unsigned mite_get_status(struct mite_channel *mite_chan)
636 {
637         struct mite_struct *mite = mite_chan->mite;
638         unsigned status;
639         unsigned long flags;
640
641         spin_lock_irqsave(&mite->lock, flags);
642         status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
643         if (status & CHSR_DONE) {
644                 mite_chan->done = 1;
645                 writel(CHOR_CLRDONE,
646                        mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
647         }
648         mmiowb();
649         spin_unlock_irqrestore(&mite->lock, flags);
650         return status;
651 }
652 EXPORT_SYMBOL_GPL(mite_get_status);
653
654 int mite_done(struct mite_channel *mite_chan)
655 {
656         struct mite_struct *mite = mite_chan->mite;
657         unsigned long flags;
658         int done;
659
660         mite_get_status(mite_chan);
661         spin_lock_irqsave(&mite->lock, flags);
662         done = mite_chan->done;
663         spin_unlock_irqrestore(&mite->lock, flags);
664         return done;
665 }
666 EXPORT_SYMBOL_GPL(mite_done);
667
668 static int __init mite_module_init(void)
669 {
670         return 0;
671 }
672
673 static void __exit mite_module_exit(void)
674 {
675 }
676
677 module_init(mite_module_init);
678 module_exit(mite_module_exit);
679
680 MODULE_AUTHOR("Comedi http://www.comedi.org");
681 MODULE_DESCRIPTION("Comedi helper for NI Mite PCI interface chip");
682 MODULE_LICENSE("GPL");