2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/version.h>
19 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/pci.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/poll.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
36 static int __init ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38 static void ca91cx42_remove(struct pci_dev *);
39 static void __exit ca91cx42_exit(void);
41 struct vme_bridge *ca91cx42_bridge;
42 wait_queue_head_t dma_queue;
43 wait_queue_head_t iack_queue;
44 wait_queue_head_t lm_queue;
45 wait_queue_head_t mbox_queue;
47 void (*lm_callback[4])(int); /* Called in interrupt handler, be careful! */
51 struct mutex vme_rmw; /* Only one RMW cycle at a time */
52 struct mutex vme_int; /*
53 * Only one VME interrupt can be
54 * generated at a time, provide locking
56 struct mutex vme_irq; /* Locking for VME irq callback configuration */
60 static char driver_name[] = "vme_ca91cx42";
62 static struct pci_device_id ca91cx42_ids[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
67 static struct pci_driver ca91cx42_driver = {
69 .id_table = ca91cx42_ids,
70 .probe = ca91cx42_probe,
71 .remove = ca91cx42_remove,
74 static u32 ca91cx42_DMA_irqhandler(void)
78 return CA91CX42_LINT_DMA;
81 static u32 ca91cx42_LM_irqhandler(u32 stat)
86 for (i = 0; i < 4; i++) {
87 if (stat & CA91CX42_LINT_LM[i]) {
88 /* We only enable interrupts if the callback is set */
90 serviced |= CA91CX42_LINT_LM[i];
97 /* XXX This needs to be split into 4 queues */
98 static u32 ca91cx42_MB_irqhandler(int mbox_mask)
100 wake_up(&mbox_queue);
102 return CA91CX42_LINT_MBOX;
105 static u32 ca91cx42_IACK_irqhandler(void)
107 wake_up(&iack_queue);
109 return CA91CX42_LINT_SW_IACK;
113 int ca91cx42_bus_error_chk(int clrflag)
116 tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
117 if (tmp & 0x08000000) { /* S_TA is Set */
119 iowrite32(tmp | 0x08000000,
120 ca91cx42_bridge->base + PCI_COMMAND);
127 static u32 ca91cx42_VERR_irqhandler(void)
131 val = ioread32(ca91cx42_bridge->base + DGCS);
133 if (!(val & 0x00000800)) {
134 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
135 "Error DGCS=%08X\n", val);
138 return CA91CX42_LINT_VERR;
141 static u32 ca91cx42_LERR_irqhandler(void)
145 val = ioread32(ca91cx42_bridge->base + DGCS);
147 if (!(val & 0x00000800)) {
148 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
149 "Error DGCS=%08X\n", val);
153 return CA91CX42_LINT_LERR;
157 static u32 ca91cx42_VIRQ_irqhandler(int stat)
159 int vec, i, serviced = 0;
160 void (*call)(int, int, void *);
163 for (i = 7; i > 0; i--) {
164 if (stat & (1 << i)) {
165 vec = ioread32(ca91cx42_bridge->base +
166 CA91CX42_V_STATID[i]) & 0xff;
168 call = ca91cx42_bridge->irq[i - 1].callback[vec].func;
170 ca91cx42_bridge->irq[i - 1].callback[vec].priv_data;
173 call(i, vec, priv_data);
175 printk("Spurilous VME interrupt, level:%x, "
176 "vector:%x\n", i, vec);
178 serviced |= (1 << i);
185 static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
187 u32 stat, enable, serviced = 0;
189 if (dev_id != ca91cx42_bridge->base)
192 enable = ioread32(ca91cx42_bridge->base + LINT_EN);
193 stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
195 /* Only look at unmasked interrupts */
201 if (stat & CA91CX42_LINT_DMA)
202 serviced |= ca91cx42_DMA_irqhandler();
203 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
205 serviced |= ca91cx42_LM_irqhandler(stat);
206 if (stat & CA91CX42_LINT_MBOX)
207 serviced |= ca91cx42_MB_irqhandler(stat);
208 if (stat & CA91CX42_LINT_SW_IACK)
209 serviced |= ca91cx42_IACK_irqhandler();
210 if (stat & CA91CX42_LINT_VERR)
211 serviced |= ca91cx42_VERR_irqhandler();
212 if (stat & CA91CX42_LINT_LERR)
213 serviced |= ca91cx42_LERR_irqhandler();
214 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
215 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
216 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
217 CA91CX42_LINT_VIRQ7))
218 serviced |= ca91cx42_VIRQ_irqhandler(stat);
220 /* Clear serviced interrupts */
221 iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
226 static int ca91cx42_irq_init(struct vme_bridge *bridge)
229 struct pci_dev *pdev;
232 pdev = container_of(bridge->parent, struct pci_dev, dev);
234 /* Initialise list for VME bus errors */
235 INIT_LIST_HEAD(&(bridge->vme_errors));
237 /* Disable interrupts from PCI to VME */
238 iowrite32(0, bridge->base + VINT_EN);
240 /* Disable PCI interrupts */
241 iowrite32(0, bridge->base + LINT_EN);
242 /* Clear Any Pending PCI Interrupts */
243 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
245 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
248 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
253 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
254 iowrite32(0, bridge->base + LINT_MAP0);
255 iowrite32(0, bridge->base + LINT_MAP1);
256 iowrite32(0, bridge->base + LINT_MAP2);
258 /* Enable DMA, mailbox & LM Interrupts */
259 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
260 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
261 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
263 iowrite32(tmp, bridge->base + LINT_EN);
268 static void ca91cx42_irq_exit(struct pci_dev *pdev)
270 /* Disable interrupts from PCI to VME */
271 iowrite32(0, ca91cx42_bridge->base + VINT_EN);
273 /* Disable PCI interrupts */
274 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
275 /* Clear Any Pending PCI Interrupts */
276 iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
278 free_irq(pdev->irq, pdev);
282 * Set up an VME interrupt
284 int ca91cx42_request_irq(int level, int statid,
285 void (*callback)(int level, int vector, void *priv_data),
290 mutex_lock(&(vme_irq));
292 if (ca91cx42_bridge->irq[level - 1].callback[statid].func) {
293 mutex_unlock(&(vme_irq));
294 printk("VME Interrupt already taken\n");
299 ca91cx42_bridge->irq[level - 1].count++;
300 ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
301 ca91cx42_bridge->irq[level - 1].callback[statid].func = callback;
303 /* Enable IRQ level */
304 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
305 tmp |= CA91CX42_LINT_VIRQ[level];
306 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
308 mutex_unlock(&(vme_irq));
316 void ca91cx42_free_irq(int level, int statid)
319 struct pci_dev *pdev;
321 mutex_lock(&(vme_irq));
323 ca91cx42_bridge->irq[level - 1].count--;
325 /* Disable IRQ level if no more interrupts attached at this level*/
326 if (ca91cx42_bridge->irq[level - 1].count == 0) {
327 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
328 tmp &= ~CA91CX42_LINT_VIRQ[level];
329 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
331 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
334 synchronize_irq(pdev->irq);
337 ca91cx42_bridge->irq[level - 1].callback[statid].func = NULL;
338 ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = NULL;
340 mutex_unlock(&(vme_irq));
343 int ca91cx42_generate_irq(int level, int statid)
347 /* Universe can only generate even vectors */
351 mutex_lock(&(vme_int));
353 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
356 iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
358 /* Assert VMEbus IRQ */
359 tmp = tmp | (1 << (level + 24));
360 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
363 wait_event_interruptible(iack_queue, 0);
365 /* Return interrupt to low state */
366 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
367 tmp = tmp & ~(1 << (level + 24));
368 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
370 mutex_unlock(&(vme_int));
375 int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
376 unsigned long long vme_base, unsigned long long size,
377 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
379 unsigned int i, addr = 0, granularity = 0;
380 unsigned int temp_ctl = 0;
381 unsigned int vme_bound, pci_offset;
387 addr |= CA91CX42_VSI_CTL_VAS_A16;
390 addr |= CA91CX42_VSI_CTL_VAS_A24;
393 addr |= CA91CX42_VSI_CTL_VAS_A32;
396 addr |= CA91CX42_VSI_CTL_VAS_USER1;
399 addr |= CA91CX42_VSI_CTL_VAS_USER2;
406 printk(KERN_ERR "Invalid address space\n");
412 * Bound address is a valid address for the window, adjust
415 vme_bound = vme_base + size - granularity;
416 pci_offset = pci_base - vme_base;
418 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
419 * too big for registers
422 if ((i == 0) || (i == 4))
423 granularity = 0x1000;
425 granularity = 0x10000;
427 if (vme_base & (granularity - 1)) {
428 printk(KERN_ERR "Invalid VME base alignment\n");
431 if (vme_bound & (granularity - 1)) {
432 printk(KERN_ERR "Invalid VME bound alignment\n");
435 if (pci_offset & (granularity - 1)) {
436 printk(KERN_ERR "Invalid PCI Offset alignment\n");
440 /* Disable while we are mucking around */
441 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
442 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
443 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
446 iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
447 iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
448 iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
450 /* XXX Prefetch stuff currently unsupported */
452 if (vmeIn->wrPostEnable)
453 temp_ctl |= CA91CX42_VSI_CTL_PWEN;
454 if (vmeIn->prefetchEnable)
455 temp_ctl |= CA91CX42_VSI_CTL_PREN;
457 temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
458 if (vmeIn->data64BitCapable)
459 temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
462 /* Setup address space */
463 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
466 /* Setup cycle types */
467 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
468 if (cycle & VME_SUPER)
469 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
470 if (cycle & VME_USER)
471 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
472 if (cycle & VME_PROG)
473 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
474 if (cycle & VME_DATA)
475 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
477 /* Write ctl reg without enable */
478 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
481 temp_ctl |= CA91CX42_VSI_CTL_EN;
483 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
488 int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
489 unsigned long long *vme_base, unsigned long long *size,
490 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
492 unsigned int i, granularity = 0, ctl = 0;
493 unsigned long long vme_bound, pci_offset;
497 if ((i == 0) || (i == 4))
498 granularity = 0x1000;
500 granularity = 0x10000;
503 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
505 *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
506 vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
507 pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
509 *pci_base = (dma_addr_t)vme_base + pci_offset;
510 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
516 if (ctl & CA91CX42_VSI_CTL_EN)
519 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
521 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
523 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
525 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
527 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
530 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
532 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
534 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
536 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
543 * Allocate and map PCI Resource
545 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
546 unsigned long long size)
548 unsigned long long existing_size;
550 struct pci_dev *pdev;
552 /* Find pci_dev container of dev */
553 if (ca91cx42_bridge->parent == NULL) {
554 printk(KERN_ERR "Dev entry NULL\n");
557 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
559 existing_size = (unsigned long long)(image->pci_resource.end -
560 image->pci_resource.start);
562 /* If the existing size is OK, return */
563 if (existing_size == (size - 1))
566 if (existing_size != 0) {
567 iounmap(image->kern_base);
568 image->kern_base = NULL;
569 if (image->pci_resource.name != NULL)
570 kfree(image->pci_resource.name);
571 release_resource(&(image->pci_resource));
572 memset(&(image->pci_resource), 0, sizeof(struct resource));
575 if (image->pci_resource.name == NULL) {
576 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
577 if (image->pci_resource.name == NULL) {
578 printk(KERN_ERR "Unable to allocate memory for resource"
585 sprintf((char *)image->pci_resource.name, "%s.%d",
586 ca91cx42_bridge->name, image->number);
588 image->pci_resource.start = 0;
589 image->pci_resource.end = (unsigned long)size;
590 image->pci_resource.flags = IORESOURCE_MEM;
592 retval = pci_bus_alloc_resource(pdev->bus,
593 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
596 printk(KERN_ERR "Failed to allocate mem resource for "
597 "window %d size 0x%lx start 0x%lx\n",
598 image->number, (unsigned long)size,
599 (unsigned long)image->pci_resource.start);
603 image->kern_base = ioremap_nocache(
604 image->pci_resource.start, size);
605 if (image->kern_base == NULL) {
606 printk(KERN_ERR "Failed to remap resource\n");
613 iounmap(image->kern_base);
614 image->kern_base = NULL;
616 release_resource(&(image->pci_resource));
618 kfree(image->pci_resource.name);
619 memset(&(image->pci_resource), 0, sizeof(struct resource));
625 * * Free and unmap PCI Resource
627 static void ca91cx42_free_resource(struct vme_master_resource *image)
629 iounmap(image->kern_base);
630 image->kern_base = NULL;
631 release_resource(&(image->pci_resource));
632 kfree(image->pci_resource.name);
633 memset(&(image->pci_resource), 0, sizeof(struct resource));
637 int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
638 unsigned long long vme_base, unsigned long long size,
639 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
643 unsigned int temp_ctl = 0;
644 unsigned long long pci_bound, vme_offset, pci_base;
646 /* Verify input data */
647 if (vme_base & 0xFFF) {
648 printk(KERN_ERR "Invalid VME Window alignment\n");
653 printk(KERN_ERR "Invalid VME Window alignment\n");
658 spin_lock(&(image->lock));
660 /* XXX We should do this much later, so that we can exit without
661 * needing to redo the mapping...
664 * Let's allocate the resource here rather than further up the stack as
665 * it avoids pushing loads of bus dependant stuff up the stack
667 retval = ca91cx42_alloc_resource(image, size);
669 spin_unlock(&(image->lock));
670 printk(KERN_ERR "Unable to allocate memory for resource "
676 pci_base = (unsigned long long)image->pci_resource.start;
679 * Bound address is a valid address for the window, adjust
680 * according to window granularity.
682 pci_bound = pci_base + (size - 0x1000);
683 vme_offset = vme_base - pci_base;
687 /* Disable while we are mucking around */
688 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
689 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
690 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
692 /* XXX Prefetch stuff currently unsupported */
694 if (vmeOut->wrPostEnable)
695 temp_ctl |= 0x40000000;
698 /* Setup cycle types */
699 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
701 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
702 if (cycle & VME_MBLT)
703 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
705 /* Setup data width */
706 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
709 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
712 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
715 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
718 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
721 spin_unlock(&(image->lock));
722 printk(KERN_ERR "Invalid data width\n");
728 /* Setup address space */
729 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
732 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
735 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
738 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
741 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
744 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
747 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
753 spin_unlock(&(image->lock));
754 printk(KERN_ERR "Invalid address space\n");
760 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
761 if (cycle & VME_SUPER)
762 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
763 if (cycle & VME_PROG)
764 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
767 iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
768 iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
769 iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
771 /* Write ctl reg without enable */
772 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
775 temp_ctl |= CA91CX42_LSI_CTL_EN;
777 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
779 spin_unlock(&(image->lock));
784 ca91cx42_free_resource(image);
790 int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
791 unsigned long long *vme_base, unsigned long long *size,
792 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
795 unsigned long long pci_base, pci_bound, vme_offset;
799 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
801 pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
802 vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
803 pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
805 *vme_base = pci_base + vme_offset;
806 *size = (pci_bound - pci_base) + 0x1000;
813 if (ctl & CA91CX42_LSI_CTL_EN)
816 /* Setup address space */
817 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
818 case CA91CX42_LSI_CTL_VAS_A16:
821 case CA91CX42_LSI_CTL_VAS_A24:
824 case CA91CX42_LSI_CTL_VAS_A32:
827 case CA91CX42_LSI_CTL_VAS_CRCSR:
830 case CA91CX42_LSI_CTL_VAS_USER1:
833 case CA91CX42_LSI_CTL_VAS_USER2:
838 /* XXX Not sure howto check for MBLT */
839 /* Setup cycle types */
840 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
845 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
850 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
855 /* Setup data width */
856 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
857 case CA91CX42_LSI_CTL_VDW_D8:
860 case CA91CX42_LSI_CTL_VDW_D16:
863 case CA91CX42_LSI_CTL_VDW_D32:
866 case CA91CX42_LSI_CTL_VDW_D64:
871 /* XXX Prefetch stuff currently unsupported */
873 if (ctl & 0x40000000)
874 vmeOut->wrPostEnable = 1;
880 int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
881 unsigned long long *vme_base, unsigned long long *size,
882 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
886 spin_lock(&(image->lock));
888 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
891 spin_unlock(&(image->lock));
896 ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
897 size_t count, loff_t offset)
901 spin_lock(&(image->lock));
903 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
906 spin_unlock(&(image->lock));
911 ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
912 size_t count, loff_t offset)
916 spin_lock(&(image->lock));
918 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
921 spin_unlock(&(image->lock));
926 int ca91cx42_slot_get(void)
930 slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
931 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
936 static int __init ca91cx42_init(void)
938 return pci_register_driver(&ca91cx42_driver);
942 * Configure CR/CSR space
944 * Access to the CR/CSR can be configured at power-up. The location of the
945 * CR/CSR registers in the CR/CSR address space is determined by the boards
946 * Auto-ID or Geographic address. This function ensures that the window is
947 * enabled at an offset consistent with the boards geopgraphic address.
949 static int ca91cx42_crcsr_init(struct pci_dev *pdev)
951 unsigned int crcsr_addr;
954 /* XXX We may need to set this somehow as the Universe II does not support
955 * geographical addressing.
958 if (vme_slotnum != -1)
959 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
961 slot = ca91cx42_slot_get();
962 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
964 dev_err(&pdev->dev, "Slot number is unset, not configuring "
969 /* Allocate mem for CR/CSR image */
970 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
972 if (crcsr_kernel == NULL) {
973 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
978 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
980 crcsr_addr = slot * (512 * 1024);
981 iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
983 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
984 tmp |= CA91CX42_VCSR_CTL_EN;
985 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
990 static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
994 /* Turn off CR/CSR space */
995 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
996 tmp &= ~CA91CX42_VCSR_CTL_EN;
997 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
1000 iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
1002 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
1005 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1009 struct list_head *pos = NULL;
1010 struct vme_master_resource *master_image;
1011 struct vme_slave_resource *slave_image;
1013 struct vme_dma_resource *dma_ctrlr;
1015 struct vme_lm_resource *lm;
1017 /* We want to support more than one of each bridge so we need to
1018 * dynamically allocate the bridge structure
1020 ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1022 if (ca91cx42_bridge == NULL) {
1023 dev_err(&pdev->dev, "Failed to allocate memory for device "
1029 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
1031 /* Enable the device */
1032 retval = pci_enable_device(pdev);
1034 dev_err(&pdev->dev, "Unable to enable device\n");
1039 retval = pci_request_regions(pdev, driver_name);
1041 dev_err(&pdev->dev, "Unable to reserve resources\n");
1045 /* map registers in BAR 0 */
1046 ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1048 if (!ca91cx42_bridge->base) {
1049 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1054 /* Check to see if the mapping worked out */
1055 data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1056 if (data != PCI_VENDOR_ID_TUNDRA) {
1057 dev_err(&pdev->dev, "PCI_ID check failed\n");
1062 /* Initialize wait queues & mutual exclusion flags */
1063 /* XXX These need to be moved to the vme_bridge structure */
1064 init_waitqueue_head(&dma_queue);
1065 init_waitqueue_head(&iack_queue);
1066 mutex_init(&(vme_int));
1067 mutex_init(&(vme_irq));
1068 mutex_init(&(vme_rmw));
1070 ca91cx42_bridge->parent = &(pdev->dev);
1071 strcpy(ca91cx42_bridge->name, driver_name);
1074 retval = ca91cx42_irq_init(ca91cx42_bridge);
1076 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1080 /* Add master windows to list */
1081 INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1082 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1083 master_image = kmalloc(sizeof(struct vme_master_resource),
1085 if (master_image == NULL) {
1086 dev_err(&pdev->dev, "Failed to allocate memory for "
1087 "master resource structure\n");
1091 master_image->parent = ca91cx42_bridge;
1092 spin_lock_init(&(master_image->lock));
1093 master_image->locked = 0;
1094 master_image->number = i;
1095 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1096 VME_CRCSR | VME_USER1 | VME_USER2;
1097 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1098 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1099 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1100 memset(&(master_image->pci_resource), 0,
1101 sizeof(struct resource));
1102 master_image->kern_base = NULL;
1103 list_add_tail(&(master_image->list),
1104 &(ca91cx42_bridge->master_resources));
1107 /* Add slave windows to list */
1108 INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1109 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1110 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1112 if (slave_image == NULL) {
1113 dev_err(&pdev->dev, "Failed to allocate memory for "
1114 "slave resource structure\n");
1118 slave_image->parent = ca91cx42_bridge;
1119 mutex_init(&(slave_image->mtx));
1120 slave_image->locked = 0;
1121 slave_image->number = i;
1122 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1125 /* Only windows 0 and 4 support A16 */
1126 if (i == 0 || i == 4)
1127 slave_image->address_attr |= VME_A16;
1129 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1130 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1131 list_add_tail(&(slave_image->list),
1132 &(ca91cx42_bridge->slave_resources));
1135 /* Add dma engines to list */
1136 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1137 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1138 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1140 if (dma_ctrlr == NULL) {
1141 dev_err(&pdev->dev, "Failed to allocate memory for "
1142 "dma resource structure\n");
1146 dma_ctrlr->parent = ca91cx42_bridge;
1147 mutex_init(&(dma_ctrlr->mtx));
1148 dma_ctrlr->locked = 0;
1149 dma_ctrlr->number = i;
1150 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1151 INIT_LIST_HEAD(&(dma_ctrlr->running));
1152 list_add_tail(&(dma_ctrlr->list),
1153 &(ca91cx42_bridge->dma_resources));
1156 /* Add location monitor to list */
1157 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1158 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1160 dev_err(&pdev->dev, "Failed to allocate memory for "
1161 "location monitor resource structure\n");
1165 lm->parent = ca91cx42_bridge;
1166 mutex_init(&(lm->mtx));
1170 list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1172 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1173 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1174 ca91cx42_bridge->master_get = ca91cx42_master_get;
1175 ca91cx42_bridge->master_set = ca91cx42_master_set;
1176 ca91cx42_bridge->master_read = ca91cx42_master_read;
1177 ca91cx42_bridge->master_write = ca91cx42_master_write;
1179 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1180 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1181 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1182 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1184 ca91cx42_bridge->request_irq = ca91cx42_request_irq;
1185 ca91cx42_bridge->free_irq = ca91cx42_free_irq;
1186 ca91cx42_bridge->generate_irq = ca91cx42_generate_irq;
1188 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1189 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1190 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1191 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1193 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1195 data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1196 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1197 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1198 dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1200 if (ca91cx42_crcsr_init(pdev)) {
1201 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1208 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1211 retval = vme_register_bridge(ca91cx42_bridge);
1213 dev_err(&pdev->dev, "Chip Registration failed.\n");
1219 vme_unregister_bridge(ca91cx42_bridge);
1221 ca91cx42_crcsr_exit(pdev);
1224 /* resources are stored in link list */
1225 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1226 lm = list_entry(pos, struct vme_lm_resource, list);
1232 /* resources are stored in link list */
1233 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1234 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1240 /* resources are stored in link list */
1241 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1242 slave_image = list_entry(pos, struct vme_slave_resource, list);
1247 /* resources are stored in link list */
1248 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1249 master_image = list_entry(pos, struct vme_master_resource,
1252 kfree(master_image);
1255 ca91cx42_irq_exit(pdev);
1258 iounmap(ca91cx42_bridge->base);
1260 pci_release_regions(pdev);
1262 pci_disable_device(pdev);
1264 kfree(ca91cx42_bridge);
1270 void ca91cx42_remove(struct pci_dev *pdev)
1272 struct list_head *pos = NULL;
1273 struct vme_master_resource *master_image;
1274 struct vme_slave_resource *slave_image;
1275 struct vme_dma_resource *dma_ctrlr;
1276 struct vme_lm_resource *lm;
1280 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1282 /* Turn off the windows */
1283 iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1284 iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1285 iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1286 iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1287 iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1288 iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1289 iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1290 iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1291 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1292 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1293 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1294 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1295 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1296 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1297 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1298 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1300 vme_unregister_bridge(ca91cx42_bridge);
1302 ca91cx42_crcsr_exit(pdev);
1304 /* resources are stored in link list */
1305 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1306 lm = list_entry(pos, struct vme_lm_resource, list);
1311 /* resources are stored in link list */
1312 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1313 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1318 /* resources are stored in link list */
1319 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1320 slave_image = list_entry(pos, struct vme_slave_resource, list);
1325 /* resources are stored in link list */
1326 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1327 master_image = list_entry(pos, struct vme_master_resource,
1330 kfree(master_image);
1333 ca91cx42_irq_exit(pdev);
1335 iounmap(ca91cx42_bridge->base);
1337 pci_release_regions(pdev);
1339 pci_disable_device(pdev);
1341 kfree(ca91cx42_bridge);
1344 static void __exit ca91cx42_exit(void)
1346 pci_unregister_driver(&ca91cx42_driver);
1349 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1350 MODULE_LICENSE("GPL");
1352 module_init(ca91cx42_init);
1353 module_exit(ca91cx42_exit);
1355 /*----------------------------------------------------------------------------
1357 *--------------------------------------------------------------------------*/
1360 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1362 int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1370 int *rmw_pci_data_ptr = NULL;
1371 int *vaDataPtr = NULL;
1373 vmeOutWindowCfg_t vmeOut;
1374 if (vmeRmw->maxAttempts < 1) {
1377 if (vmeRmw->targetAddrU) {
1380 /* Find the PCI address that maps to the desired VME address */
1381 for (i = 0; i < 8; i++) {
1382 temp_ctl = ioread32(ca91cx42_bridge->base +
1383 CA91CX42_LSI_CTL[i]);
1384 if ((temp_ctl & 0x80000000) == 0) {
1387 memset(&vmeOut, 0, sizeof(vmeOut));
1388 vmeOut.windowNbr = i;
1389 ca91cx42_get_out_bound(&vmeOut);
1390 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1393 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1394 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1395 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
1396 vmeBS = tempBS + tempTO;
1397 vmeBD = tempBD + tempTO;
1398 if ((vmeRmw->targetAddr >= vmeBS) &&
1399 (vmeRmw->targetAddr < vmeBD)) {
1401 (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1403 (int *)(out_image_va[i] +
1404 (vmeRmw->targetAddr - vmeBS));
1409 /* If no window - fail. */
1410 if (rmw_pci_data_ptr == NULL) {
1413 /* Setup the RMW registers. */
1414 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1415 iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1416 iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1418 iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1419 iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1420 iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
1422 /* Run the RMW cycle until either success or max attempts. */
1423 vmeRmw->numAttempts = 1;
1424 while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1426 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1427 (vmeRmw->swapData & vmeRmw->enableMask)) {
1429 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1433 vmeRmw->numAttempts++;
1436 /* If no success, set num Attempts to be greater than max attempts */
1437 if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1438 vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1444 int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1446 unsigned int dctlreg = 0x80;
1447 struct vmeAttr *vmeAttr;
1449 if (vmeDma->srcBus == VME_DMA_VME) {
1451 vmeAttr = &vmeDma->srcVmeAttr;
1453 dctlreg = 0x80000000;
1454 vmeAttr = &vmeDma->dstVmeAttr;
1457 switch (vmeAttr->maxDataWidth) {
1461 dctlreg |= 0x00400000;
1464 dctlreg |= 0x00800000;
1467 dctlreg |= 0x00C00000;
1471 switch (vmeAttr->addrSpace) {
1475 dctlreg |= 0x00010000;
1478 dctlreg |= 0x00020000;
1481 dctlreg |= 0x00060000;
1484 dctlreg |= 0x00070000;
1487 case VME_A64: /* not supported in Universe DMA */
1494 if (vmeAttr->userAccessType == VME_PROG) {
1495 dctlreg |= 0x00004000;
1497 if (vmeAttr->dataAccessType == VME_SUPER) {
1498 dctlreg |= 0x00001000;
1500 if (vmeAttr->xferProtocol != VME_SCT) {
1501 dctlreg |= 0x00000100;
1503 *dctlregreturn = dctlreg;
1508 ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1512 /* Setup registers as needed for direct or chained. */
1513 if (dgcsreg & 0x8000000) {
1514 iowrite32(0, ca91cx42_bridge->base + DTBC);
1515 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
1518 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1519 printk(KERN_ERR "Starting: DVA = %08x\n",
1520 ioread32(&vmeLL->dva));
1521 printk(KERN_ERR "Starting: DLV = %08x\n",
1522 ioread32(&vmeLL->dlv));
1523 printk(KERN_ERR "Starting: DTBC = %08x\n",
1524 ioread32(&vmeLL->dtbc));
1525 printk(KERN_ERR "Starting: DCTL = %08x\n",
1526 ioread32(&vmeLL->dctl));
1528 /* Write registers */
1529 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1530 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1531 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1532 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1533 iowrite32(0, ca91cx42_bridge->base + DCPP);
1536 /* Start the operation */
1537 iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
1539 iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1543 TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1545 vmeDmaPacket_t *vmeCur;
1548 TDMA_Cmd_Packet *startLL;
1549 TDMA_Cmd_Packet *currentLL;
1550 TDMA_Cmd_Packet *nextLL;
1551 unsigned int dctlreg = 0;
1553 maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1554 startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1558 /* First allocate pages for descriptors and create linked list */
1560 currentLL = startLL;
1562 while (vmeCur != 0) {
1563 if (vmeCur->pNextPacket != 0) {
1564 currentLL->dcpp = (unsigned int)(currentLL + 1);
1566 if (currentLLcount >= maxPerPage) {
1568 __get_free_pages(GFP_KERNEL, 0);
1571 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1573 currentLL->dcpp = (unsigned int)0;
1575 vmeCur = vmeCur->pNextPacket;
1578 /* Next fill in information for each descriptor */
1580 currentLL = startLL;
1581 while (vmeCur != 0) {
1582 if (vmeCur->srcBus == VME_DMA_VME) {
1583 iowrite32(vmeCur->srcAddr, ¤tLL->dva);
1584 iowrite32(vmeCur->dstAddr, ¤tLL->dlv);
1586 iowrite32(vmeCur->srcAddr, ¤tLL->dlv);
1587 iowrite32(vmeCur->dstAddr, ¤tLL->dva);
1589 uniSetupDctlReg(vmeCur, &dctlreg);
1590 iowrite32(dctlreg, ¤tLL->dctl);
1591 iowrite32(vmeCur->byteCount, ¤tLL->dtbc);
1593 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1594 vmeCur = vmeCur->pNextPacket;
1597 /* Convert Links to PCI addresses. */
1598 currentLL = startLL;
1599 while (currentLL != 0) {
1600 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1602 iowrite32(1, ¤tLL->dcpp);
1604 iowrite32((unsigned int)virt_to_bus(nextLL),
1610 /* Return pointer to descriptors list */
1614 int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1616 TDMA_Cmd_Packet *currentLL;
1617 TDMA_Cmd_Packet *prevLL;
1618 TDMA_Cmd_Packet *nextLL;
1619 unsigned int dcppreg;
1621 /* Convert Links to virtual addresses. */
1622 currentLL = startLL;
1623 while (currentLL != 0) {
1624 dcppreg = ioread32(¤tLL->dcpp);
1627 currentLL->dcpp = 0;
1629 currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1631 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1634 /* Free all pages associated with the descriptors. */
1635 currentLL = startLL;
1637 while (currentLL != 0) {
1638 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1639 if (currentLL + 1 != nextLL) {
1640 free_pages((int)prevLL, 0);
1646 /* Return pointer to descriptors list */
1650 int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1652 unsigned int dgcsreg = 0;
1653 unsigned int dctlreg = 0;
1656 vmeDmaPacket_t *curDma;
1657 TDMA_Cmd_Packet *dmaLL;
1659 /* Sanity check the VME chain. */
1660 channel = vmeDma->channel_number;
1665 while (curDma != 0) {
1666 if (curDma->byteCount == 0) {
1669 if (curDma->byteCount >= 0x1000000) {
1672 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1675 switch (curDma->srcBus) {
1677 if (curDma->dstBus != VME_DMA_VME) {
1682 if (curDma->dstBus != VME_DMA_PCI) {
1690 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1694 curDma = curDma->pNextPacket;
1695 if (curDma == vmeDma) { /* Endless Loop! */
1700 /* calculate control register */
1701 if (vmeDma->pNextPacket != 0) {
1702 dgcsreg = 0x8000000;
1707 for (x = 0; x < 8; x++) { /* vme block size */
1708 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1714 dgcsreg |= (x << 20);
1716 if (vmeDma->vmeBackOffTimer) {
1717 for (x = 1; x < 8; x++) { /* vme timer */
1718 if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1724 dgcsreg |= (x << 16);
1726 /*` Setup the dma chain */
1727 dmaLL = ca91cx42_setup_dma(vmeDma);
1730 if (dgcsreg & 0x8000000) {
1731 vmeDma->vmeDmaStartTick =
1732 ca91cx42_start_dma(channel, dgcsreg,
1733 (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1735 vmeDma->vmeDmaStartTick =
1736 ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1739 wait_event_interruptible(dma_queue,
1740 ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
1742 val = ioread32(ca91cx42_bridge->base + DGCS);
1743 iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
1745 vmeDma->vmeDmaStatus = 0;
1747 if (!(val & 0x00000800)) {
1748 vmeDma->vmeDmaStatus = val & 0x700;
1749 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1750 " DGCS=%08X\n", val);
1751 val = ioread32(ca91cx42_bridge->base + DCPP);
1752 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1753 val = ioread32(ca91cx42_bridge->base + DCTL);
1754 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1755 val = ioread32(ca91cx42_bridge->base + DTBC);
1756 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1757 val = ioread32(ca91cx42_bridge->base + DLA);
1758 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1759 val = ioread32(ca91cx42_bridge->base + DVA);
1760 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1763 /* Free the dma chain */
1764 ca91cx42_free_dma(dmaLL);
1769 int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1776 switch (vmeLm->addrSpace) {
1782 temp_ctl |= 0x00000;
1785 temp_ctl |= 0x10000;
1788 temp_ctl |= 0x20000;
1791 temp_ctl |= 0x50000;
1794 temp_ctl |= 0x60000;
1797 temp_ctl |= 0x70000;
1801 /* Disable while we are mucking around */
1802 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1804 iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1806 /* Setup CTL register. */
1807 if (vmeLm->userAccessType & VME_SUPER)
1808 temp_ctl |= 0x00200000;
1809 if (vmeLm->userAccessType & VME_USER)
1810 temp_ctl |= 0x00100000;
1811 if (vmeLm->dataAccessType & VME_PROG)
1812 temp_ctl |= 0x00800000;
1813 if (vmeLm->dataAccessType & VME_DATA)
1814 temp_ctl |= 0x00400000;
1817 /* Write ctl reg and enable */
1818 iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1819 temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1824 int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1826 unsigned long flags;
1829 spin_lock_irqsave(&lm_lock, flags);
1830 spin_unlock_irqrestore(&lm_lock, flags);
1832 if (vmeLm->lmWait < 10)
1834 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1836 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1843 int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1848 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1849 temp_ctl &= 0x00FFFFFF;
1851 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1853 } else if (vmeArb->globalTimeoutTimer > 1024) {
1855 } else if (vmeArb->globalTimeoutTimer == 0) {
1859 while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1862 temp_ctl |= (vbto << 28);
1864 if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1865 temp_ctl |= 1 << 26;
1867 if (vmeArb->arbiterTimeoutFlag)
1868 temp_ctl |= 2 << 24;
1870 iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1874 int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1879 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1881 vbto = (temp_ctl >> 28) & 0xF;
1883 vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1885 if (temp_ctl & (1 << 26))
1886 vmeArb->arbiterMode = VME_PRIORITY_MODE;
1888 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1890 if (temp_ctl & (3 << 24))
1891 vmeArb->arbiterTimeoutFlag = 1;
1896 int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1900 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1901 temp_ctl &= 0xFF0FFFFF;
1903 if (vmeReq->releaseMode == 1)
1904 temp_ctl |= (1 << 20);
1906 if (vmeReq->fairMode == 1)
1907 temp_ctl |= (1 << 21);
1909 temp_ctl |= (vmeReq->requestLevel << 22);
1911 iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1915 int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1919 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1921 if (temp_ctl & (1 << 20))
1922 vmeReq->releaseMode = 1;
1924 if (temp_ctl & (1 << 21))
1925 vmeReq->fairMode = 1;
1927 vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;