]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/staging/vme/bridges/vme_ca91cx42.c
Merge branch 'fix/hda' into topic/hda
[mv-sheeva.git] / drivers / staging / vme / bridges / vme_ca91cx42.c
1 /*
2  * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3  *
4  * Author: Martyn Welch <martyn.welch@gefanuc.com>
5  * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * Derived from ca91c042.c by Michael Wyrick
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17
18 #include <linux/version.h>
19 #include <linux/module.h>
20 #include <linux/mm.h>
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/pci.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/poll.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
35
36 static int __init ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38 static void ca91cx42_remove(struct pci_dev *);
39 static void __exit ca91cx42_exit(void);
40
41 struct vme_bridge *ca91cx42_bridge;
42 wait_queue_head_t dma_queue;
43 wait_queue_head_t iack_queue;
44 wait_queue_head_t lm_queue;
45 wait_queue_head_t mbox_queue;
46
47 void (*lm_callback[4])(int);    /* Called in interrupt handler, be careful! */
48 void *crcsr_kernel;
49 dma_addr_t crcsr_bus;
50
51 struct mutex vme_rmw;   /* Only one RMW cycle at a time */
52 struct mutex vme_int;   /*
53                          * Only one VME interrupt can be
54                          * generated at a time, provide locking
55                          */
56 struct mutex vme_irq;   /* Locking for VME irq callback configuration */
57
58
59
60 static char driver_name[] = "vme_ca91cx42";
61
62 static struct pci_device_id ca91cx42_ids[] = {
63         { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
64         { },
65 };
66
67 static struct pci_driver ca91cx42_driver = {
68         .name = driver_name,
69         .id_table = ca91cx42_ids,
70         .probe = ca91cx42_probe,
71         .remove = ca91cx42_remove,
72 };
73
74 static u32 ca91cx42_DMA_irqhandler(void)
75 {
76         wake_up(&dma_queue);
77
78         return CA91CX42_LINT_DMA;
79 }
80
81 static u32 ca91cx42_LM_irqhandler(u32 stat)
82 {
83         int i;
84         u32 serviced = 0;
85
86         for (i = 0; i < 4; i++) {
87                 if (stat & CA91CX42_LINT_LM[i]) {
88                         /* We only enable interrupts if the callback is set */
89                         lm_callback[i](i);
90                         serviced |= CA91CX42_LINT_LM[i];
91                 }
92         }
93
94         return serviced;
95 }
96
97 /* XXX This needs to be split into 4 queues */
98 static u32 ca91cx42_MB_irqhandler(int mbox_mask)
99 {
100         wake_up(&mbox_queue);
101
102         return CA91CX42_LINT_MBOX;
103 }
104
105 static u32 ca91cx42_IACK_irqhandler(void)
106 {
107         wake_up(&iack_queue);
108
109         return CA91CX42_LINT_SW_IACK;
110 }
111
112 #if 0
113 int ca91cx42_bus_error_chk(int clrflag)
114 {
115         int tmp;
116         tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
117         if (tmp & 0x08000000) { /* S_TA is Set */
118                 if (clrflag)
119                         iowrite32(tmp | 0x08000000,
120                                ca91cx42_bridge->base + PCI_COMMAND);
121                 return 1;
122         }
123         return 0;
124 }
125 #endif
126
127 static u32 ca91cx42_VERR_irqhandler(void)
128 {
129         int val;
130
131         val = ioread32(ca91cx42_bridge->base + DGCS);
132
133         if (!(val & 0x00000800)) {
134                 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
135                         "Error DGCS=%08X\n", val);
136         }
137
138         return CA91CX42_LINT_VERR;
139 }
140
141 static u32 ca91cx42_LERR_irqhandler(void)
142 {
143         int val;
144
145         val = ioread32(ca91cx42_bridge->base + DGCS);
146
147         if (!(val & 0x00000800)) {
148                 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
149                         "Error DGCS=%08X\n", val);
150
151         }
152
153         return CA91CX42_LINT_LERR;
154 }
155
156
157 static u32 ca91cx42_VIRQ_irqhandler(int stat)
158 {
159         int vec, i, serviced = 0;
160         void (*call)(int, int, void *);
161         void *priv_data;
162
163         for (i = 7; i > 0; i--) {
164                 if (stat & (1 << i)) {
165                         vec = ioread32(ca91cx42_bridge->base +
166                                 CA91CX42_V_STATID[i]) & 0xff;
167
168                         call = ca91cx42_bridge->irq[i - 1].callback[vec].func;
169                         priv_data =
170                         ca91cx42_bridge->irq[i - 1].callback[vec].priv_data;
171
172                         if (call != NULL)
173                                 call(i, vec, priv_data);
174                         else
175                                 printk("Spurilous VME interrupt, level:%x, "
176                                         "vector:%x\n", i, vec);
177
178                         serviced |= (1 << i);
179                 }
180         }
181
182         return serviced;
183 }
184
185 static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
186 {
187         u32 stat, enable, serviced = 0;
188
189         if (dev_id != ca91cx42_bridge->base)
190                 return IRQ_NONE;
191
192         enable = ioread32(ca91cx42_bridge->base + LINT_EN);
193         stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
194
195         /* Only look at unmasked interrupts */
196         stat &= enable;
197
198         if (unlikely(!stat))
199                 return IRQ_NONE;
200
201         if (stat & CA91CX42_LINT_DMA)
202                 serviced |= ca91cx42_DMA_irqhandler();
203         if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
204                         CA91CX42_LINT_LM3))
205                 serviced |= ca91cx42_LM_irqhandler(stat);
206         if (stat & CA91CX42_LINT_MBOX)
207                 serviced |= ca91cx42_MB_irqhandler(stat);
208         if (stat & CA91CX42_LINT_SW_IACK)
209                 serviced |= ca91cx42_IACK_irqhandler();
210         if (stat & CA91CX42_LINT_VERR)
211                 serviced |= ca91cx42_VERR_irqhandler();
212         if (stat & CA91CX42_LINT_LERR)
213                 serviced |= ca91cx42_LERR_irqhandler();
214         if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
215                         CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
216                         CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
217                         CA91CX42_LINT_VIRQ7))
218                 serviced |= ca91cx42_VIRQ_irqhandler(stat);
219
220         /* Clear serviced interrupts */
221         iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
222
223         return IRQ_HANDLED;
224 }
225
226 static int ca91cx42_irq_init(struct vme_bridge *bridge)
227 {
228         int result, tmp;
229         struct pci_dev *pdev;
230
231         /* Need pdev */
232         pdev = container_of(bridge->parent, struct pci_dev, dev);
233
234         /* Initialise list for VME bus errors */
235         INIT_LIST_HEAD(&(bridge->vme_errors));
236
237         /* Disable interrupts from PCI to VME */
238         iowrite32(0, bridge->base + VINT_EN);
239
240         /* Disable PCI interrupts */
241         iowrite32(0, bridge->base + LINT_EN);
242         /* Clear Any Pending PCI Interrupts */
243         iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
244
245         result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
246                         driver_name, pdev);
247         if (result) {
248                 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
249                        pdev->irq);
250                 return result;
251         }
252
253         /* Ensure all interrupts are mapped to PCI Interrupt 0 */
254         iowrite32(0, bridge->base + LINT_MAP0);
255         iowrite32(0, bridge->base + LINT_MAP1);
256         iowrite32(0, bridge->base + LINT_MAP2);
257
258         /* Enable DMA, mailbox & LM Interrupts */
259         tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
260                 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
261                 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
262
263         iowrite32(tmp, bridge->base + LINT_EN);
264
265         return 0;
266 }
267
268 static void ca91cx42_irq_exit(struct pci_dev *pdev)
269 {
270         /* Disable interrupts from PCI to VME */
271         iowrite32(0, ca91cx42_bridge->base + VINT_EN);
272
273         /* Disable PCI interrupts */
274         iowrite32(0, ca91cx42_bridge->base + LINT_EN);
275         /* Clear Any Pending PCI Interrupts */
276         iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
277
278         free_irq(pdev->irq, pdev);
279 }
280
281 /*
282  * Set up an VME interrupt
283  */
284 int ca91cx42_request_irq(int level, int statid,
285         void (*callback)(int level, int vector, void *priv_data),
286         void *priv_data)
287 {
288         u32 tmp;
289
290         mutex_lock(&(vme_irq));
291
292         if (ca91cx42_bridge->irq[level - 1].callback[statid].func) {
293                 mutex_unlock(&(vme_irq));
294                 printk("VME Interrupt already taken\n");
295                 return -EBUSY;
296         }
297
298
299         ca91cx42_bridge->irq[level - 1].count++;
300         ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
301         ca91cx42_bridge->irq[level - 1].callback[statid].func = callback;
302
303         /* Enable IRQ level */
304         tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
305         tmp |= CA91CX42_LINT_VIRQ[level];
306         iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
307
308         mutex_unlock(&(vme_irq));
309
310         return 0;
311 }
312
313 /*
314  * Free VME interrupt
315  */
316 void ca91cx42_free_irq(int level, int statid)
317 {
318         u32 tmp;
319         struct pci_dev *pdev;
320
321         mutex_lock(&(vme_irq));
322
323         ca91cx42_bridge->irq[level - 1].count--;
324
325         /* Disable IRQ level if no more interrupts attached at this level*/
326         if (ca91cx42_bridge->irq[level - 1].count == 0) {
327                 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
328                 tmp &= ~CA91CX42_LINT_VIRQ[level];
329                 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
330
331                 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
332                         dev);
333
334                 synchronize_irq(pdev->irq);
335         }
336
337         ca91cx42_bridge->irq[level - 1].callback[statid].func = NULL;
338         ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = NULL;
339
340         mutex_unlock(&(vme_irq));
341 }
342
343 int ca91cx42_generate_irq(int level, int statid)
344 {
345         u32 tmp;
346
347         /* Universe can only generate even vectors */
348         if (statid & 1)
349                 return -EINVAL;
350
351         mutex_lock(&(vme_int));
352
353         tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
354
355         /* Set Status/ID */
356         iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
357
358         /* Assert VMEbus IRQ */
359         tmp = tmp | (1 << (level + 24));
360         iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
361
362         /* Wait for IACK */
363         wait_event_interruptible(iack_queue, 0);
364
365         /* Return interrupt to low state */
366         tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
367         tmp = tmp & ~(1 << (level + 24));
368         iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
369
370         mutex_unlock(&(vme_int));
371
372         return 0;
373 }
374
375 int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
376         unsigned long long vme_base, unsigned long long size,
377         dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
378 {
379         unsigned int i, addr = 0, granularity = 0;
380         unsigned int temp_ctl = 0;
381         unsigned int vme_bound, pci_offset;
382
383         i = image->number;
384
385         switch (aspace) {
386         case VME_A16:
387                 addr |= CA91CX42_VSI_CTL_VAS_A16;
388                 break;
389         case VME_A24:
390                 addr |= CA91CX42_VSI_CTL_VAS_A24;
391                 break;
392         case VME_A32:
393                 addr |= CA91CX42_VSI_CTL_VAS_A32;
394                 break;
395         case VME_USER1:
396                 addr |= CA91CX42_VSI_CTL_VAS_USER1;
397                 break;
398         case VME_USER2:
399                 addr |= CA91CX42_VSI_CTL_VAS_USER2;
400                 break;
401         case VME_A64:
402         case VME_CRCSR:
403         case VME_USER3:
404         case VME_USER4:
405         default:
406                 printk(KERN_ERR "Invalid address space\n");
407                 return -EINVAL;
408                 break;
409         }
410
411         /*
412          * Bound address is a valid address for the window, adjust
413          * accordingly
414          */
415         vme_bound = vme_base + size - granularity;
416         pci_offset = pci_base - vme_base;
417
418         /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
419          * too big for registers
420          */
421
422         if ((i == 0) || (i == 4))
423                 granularity = 0x1000;
424         else
425                 granularity = 0x10000;
426
427         if (vme_base & (granularity - 1)) {
428                 printk(KERN_ERR "Invalid VME base alignment\n");
429                 return -EINVAL;
430         }
431         if (vme_bound & (granularity - 1)) {
432                 printk(KERN_ERR "Invalid VME bound alignment\n");
433                 return -EINVAL;
434         }
435         if (pci_offset & (granularity - 1)) {
436                 printk(KERN_ERR "Invalid PCI Offset alignment\n");
437                 return -EINVAL;
438         }
439
440         /* Disable while we are mucking around */
441         temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
442         temp_ctl &= ~CA91CX42_VSI_CTL_EN;
443         iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
444
445         /* Setup mapping */
446         iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
447         iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
448         iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
449
450 /* XXX Prefetch stuff currently unsupported */
451 #if 0
452         if (vmeIn->wrPostEnable)
453                 temp_ctl |= CA91CX42_VSI_CTL_PWEN;
454         if (vmeIn->prefetchEnable)
455                 temp_ctl |= CA91CX42_VSI_CTL_PREN;
456         if (vmeIn->rmwLock)
457                 temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
458         if (vmeIn->data64BitCapable)
459                 temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
460 #endif
461
462         /* Setup address space */
463         temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
464         temp_ctl |= addr;
465
466         /* Setup cycle types */
467         temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
468         if (cycle & VME_SUPER)
469                 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
470         if (cycle & VME_USER)
471                 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
472         if (cycle & VME_PROG)
473                 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
474         if (cycle & VME_DATA)
475                 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
476
477         /* Write ctl reg without enable */
478         iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
479
480         if (enabled)
481                 temp_ctl |= CA91CX42_VSI_CTL_EN;
482
483         iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
484
485         return 0;
486 }
487
488 int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
489         unsigned long long *vme_base, unsigned long long *size,
490         dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
491 {
492         unsigned int i, granularity = 0, ctl = 0;
493         unsigned long long vme_bound, pci_offset;
494
495         i = image->number;
496
497         if ((i == 0) || (i == 4))
498                 granularity = 0x1000;
499         else
500                 granularity = 0x10000;
501
502         /* Read Registers */
503         ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
504
505         *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
506         vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
507         pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
508
509         *pci_base = (dma_addr_t)vme_base + pci_offset;
510         *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
511
512         *enabled = 0;
513         *aspace = 0;
514         *cycle = 0;
515
516         if (ctl & CA91CX42_VSI_CTL_EN)
517                 *enabled = 1;
518
519         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
520                 *aspace = VME_A16;
521         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
522                 *aspace = VME_A24;
523         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
524                 *aspace = VME_A32;
525         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
526                 *aspace = VME_USER1;
527         if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
528                 *aspace = VME_USER2;
529
530         if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
531                 *cycle |= VME_SUPER;
532         if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
533                 *cycle |= VME_USER;
534         if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
535                 *cycle |= VME_PROG;
536         if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
537                 *cycle |= VME_DATA;
538
539         return 0;
540 }
541
542 /*
543  * Allocate and map PCI Resource
544  */
545 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
546         unsigned long long size)
547 {
548         unsigned long long existing_size;
549         int retval = 0;
550         struct pci_dev *pdev;
551
552         /* Find pci_dev container of dev */
553         if (ca91cx42_bridge->parent == NULL) {
554                 printk(KERN_ERR "Dev entry NULL\n");
555                 return -EINVAL;
556         }
557         pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
558
559         existing_size = (unsigned long long)(image->pci_resource.end -
560                 image->pci_resource.start);
561
562         /* If the existing size is OK, return */
563         if (existing_size == (size - 1))
564                 return 0;
565
566         if (existing_size != 0) {
567                 iounmap(image->kern_base);
568                 image->kern_base = NULL;
569                 if (image->pci_resource.name != NULL)
570                         kfree(image->pci_resource.name);
571                 release_resource(&(image->pci_resource));
572                 memset(&(image->pci_resource), 0, sizeof(struct resource));
573         }
574
575         if (image->pci_resource.name == NULL) {
576                 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
577                 if (image->pci_resource.name == NULL) {
578                         printk(KERN_ERR "Unable to allocate memory for resource"
579                                 " name\n");
580                         retval = -ENOMEM;
581                         goto err_name;
582                 }
583         }
584
585         sprintf((char *)image->pci_resource.name, "%s.%d",
586                 ca91cx42_bridge->name, image->number);
587
588         image->pci_resource.start = 0;
589         image->pci_resource.end = (unsigned long)size;
590         image->pci_resource.flags = IORESOURCE_MEM;
591
592         retval = pci_bus_alloc_resource(pdev->bus,
593                 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
594                 0, NULL, NULL);
595         if (retval) {
596                 printk(KERN_ERR "Failed to allocate mem resource for "
597                         "window %d size 0x%lx start 0x%lx\n",
598                         image->number, (unsigned long)size,
599                         (unsigned long)image->pci_resource.start);
600                 goto err_resource;
601         }
602
603         image->kern_base = ioremap_nocache(
604                 image->pci_resource.start, size);
605         if (image->kern_base == NULL) {
606                 printk(KERN_ERR "Failed to remap resource\n");
607                 retval = -ENOMEM;
608                 goto err_remap;
609         }
610
611         return 0;
612
613         iounmap(image->kern_base);
614         image->kern_base = NULL;
615 err_remap:
616         release_resource(&(image->pci_resource));
617 err_resource:
618         kfree(image->pci_resource.name);
619         memset(&(image->pci_resource), 0, sizeof(struct resource));
620 err_name:
621         return retval;
622 }
623
624 /*
625  *  * Free and unmap PCI Resource
626  *   */
627 static void ca91cx42_free_resource(struct vme_master_resource *image)
628 {
629         iounmap(image->kern_base);
630         image->kern_base = NULL;
631         release_resource(&(image->pci_resource));
632         kfree(image->pci_resource.name);
633         memset(&(image->pci_resource), 0, sizeof(struct resource));
634 }
635
636
637 int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
638         unsigned long long vme_base, unsigned long long size,
639         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
640 {
641         int retval = 0;
642         unsigned int i;
643         unsigned int temp_ctl = 0;
644         unsigned long long pci_bound, vme_offset, pci_base;
645
646         /* Verify input data */
647         if (vme_base & 0xFFF) {
648                 printk(KERN_ERR "Invalid VME Window alignment\n");
649                 retval = -EINVAL;
650                 goto err_window;
651         }
652         if (size & 0xFFF) {
653                 printk(KERN_ERR "Invalid VME Window alignment\n");
654                 retval = -EINVAL;
655                 goto err_window;
656         }
657
658         spin_lock(&(image->lock));
659
660         /* XXX We should do this much later, so that we can exit without
661          *     needing to redo the mapping...
662          */
663         /*
664          * Let's allocate the resource here rather than further up the stack as
665          * it avoids pushing loads of bus dependant stuff up the stack
666          */
667         retval = ca91cx42_alloc_resource(image, size);
668         if (retval) {
669                 spin_unlock(&(image->lock));
670                 printk(KERN_ERR "Unable to allocate memory for resource "
671                         "name\n");
672                 retval = -ENOMEM;
673                 goto err_res;
674         }
675
676         pci_base = (unsigned long long)image->pci_resource.start;
677
678         /*
679          * Bound address is a valid address for the window, adjust
680          * according to window granularity.
681          */
682         pci_bound = pci_base + (size - 0x1000);
683         vme_offset = vme_base - pci_base;
684
685         i = image->number;
686
687         /* Disable while we are mucking around */
688         temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
689         temp_ctl &= ~CA91CX42_LSI_CTL_EN;
690         iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
691
692 /* XXX Prefetch stuff currently unsupported */
693 #if 0
694         if (vmeOut->wrPostEnable)
695                 temp_ctl |= 0x40000000;
696 #endif
697
698         /* Setup cycle types */
699         temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
700         if (cycle & VME_BLT)
701                 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
702         if (cycle & VME_MBLT)
703                 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
704
705         /* Setup data width */
706         temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
707         switch (dwidth) {
708         case VME_D8:
709                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
710                 break;
711         case VME_D16:
712                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
713                 break;
714         case VME_D32:
715                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
716                 break;
717         case VME_D64:
718                 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
719                 break;
720         default:
721                 spin_unlock(&(image->lock));
722                 printk(KERN_ERR "Invalid data width\n");
723                 retval = -EINVAL;
724                 goto err_dwidth;
725                 break;
726         }
727
728         /* Setup address space */
729         temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
730         switch (aspace) {
731         case VME_A16:
732                 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
733                 break;
734         case VME_A24:
735                 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
736                 break;
737         case VME_A32:
738                 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
739                 break;
740         case VME_CRCSR:
741                 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
742                 break;
743         case VME_USER1:
744                 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
745                 break;
746         case VME_USER2:
747                 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
748                 break;
749         case VME_A64:
750         case VME_USER3:
751         case VME_USER4:
752         default:
753                 spin_unlock(&(image->lock));
754                 printk(KERN_ERR "Invalid address space\n");
755                 retval = -EINVAL;
756                 goto err_aspace;
757                 break;
758         }
759
760         temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
761         if (cycle & VME_SUPER)
762                 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
763         if (cycle & VME_PROG)
764                 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
765
766         /* Setup mapping */
767         iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
768         iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
769         iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
770
771         /* Write ctl reg without enable */
772         iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
773
774         if (enabled)
775                 temp_ctl |= CA91CX42_LSI_CTL_EN;
776
777         iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
778
779         spin_unlock(&(image->lock));
780         return 0;
781
782 err_aspace:
783 err_dwidth:
784         ca91cx42_free_resource(image);
785 err_res:
786 err_window:
787         return retval;
788 }
789
790 int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
791         unsigned long long *vme_base, unsigned long long *size,
792         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
793 {
794         unsigned int i, ctl;
795         unsigned long long pci_base, pci_bound, vme_offset;
796
797         i = image->number;
798
799         ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
800
801         pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
802         vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
803         pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
804
805         *vme_base = pci_base + vme_offset;
806         *size = (pci_bound - pci_base) + 0x1000;
807
808         *enabled = 0;
809         *aspace = 0;
810         *cycle = 0;
811         *dwidth = 0;
812
813         if (ctl & CA91CX42_LSI_CTL_EN)
814                 *enabled = 1;
815
816         /* Setup address space */
817         switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
818         case CA91CX42_LSI_CTL_VAS_A16:
819                 *aspace = VME_A16;
820                 break;
821         case CA91CX42_LSI_CTL_VAS_A24:
822                 *aspace = VME_A24;
823                 break;
824         case CA91CX42_LSI_CTL_VAS_A32:
825                 *aspace = VME_A32;
826                 break;
827         case CA91CX42_LSI_CTL_VAS_CRCSR:
828                 *aspace = VME_CRCSR;
829                 break;
830         case CA91CX42_LSI_CTL_VAS_USER1:
831                 *aspace = VME_USER1;
832                 break;
833         case CA91CX42_LSI_CTL_VAS_USER2:
834                 *aspace = VME_USER2;
835                 break;
836         }
837
838         /* XXX Not sure howto check for MBLT */
839         /* Setup cycle types */
840         if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
841                 *cycle |= VME_BLT;
842         else
843                 *cycle |= VME_SCT;
844
845         if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
846                 *cycle |= VME_SUPER;
847         else
848                 *cycle |= VME_USER;
849
850         if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
851                 *cycle = VME_PROG;
852         else
853                 *cycle = VME_DATA;
854
855         /* Setup data width */
856         switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
857         case CA91CX42_LSI_CTL_VDW_D8:
858                 *dwidth = VME_D8;
859                 break;
860         case CA91CX42_LSI_CTL_VDW_D16:
861                 *dwidth = VME_D16;
862                 break;
863         case CA91CX42_LSI_CTL_VDW_D32:
864                 *dwidth = VME_D32;
865                 break;
866         case CA91CX42_LSI_CTL_VDW_D64:
867                 *dwidth = VME_D64;
868                 break;
869         }
870
871 /* XXX Prefetch stuff currently unsupported */
872 #if 0
873         if (ctl & 0x40000000)
874                 vmeOut->wrPostEnable = 1;
875 #endif
876
877         return 0;
878 }
879
880 int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
881         unsigned long long *vme_base, unsigned long long *size,
882         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
883 {
884         int retval;
885
886         spin_lock(&(image->lock));
887
888         retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
889                 cycle, dwidth);
890
891         spin_unlock(&(image->lock));
892
893         return retval;
894 }
895
896 ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
897         size_t count, loff_t offset)
898 {
899         int retval;
900
901         spin_lock(&(image->lock));
902
903         memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
904         retval = count;
905
906         spin_unlock(&(image->lock));
907
908         return retval;
909 }
910
911 ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
912         size_t count, loff_t offset)
913 {
914         int retval = 0;
915
916         spin_lock(&(image->lock));
917
918         memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
919         retval = count;
920
921         spin_unlock(&(image->lock));
922
923         return retval;
924 }
925
926 int ca91cx42_slot_get(void)
927 {
928         u32 slot = 0;
929
930         slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
931         slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
932         return (int)slot;
933
934 }
935
936 static int __init ca91cx42_init(void)
937 {
938         return pci_register_driver(&ca91cx42_driver);
939 }
940
941 /*
942  * Configure CR/CSR space
943  *
944  * Access to the CR/CSR can be configured at power-up. The location of the
945  * CR/CSR registers in the CR/CSR address space is determined by the boards
946  * Auto-ID or Geographic address. This function ensures that the window is
947  * enabled at an offset consistent with the boards geopgraphic address.
948  */
949 static int ca91cx42_crcsr_init(struct pci_dev *pdev)
950 {
951         unsigned int crcsr_addr;
952         int tmp, slot;
953
954 /* XXX We may need to set this somehow as the Universe II does not support
955  *     geographical addressing.
956  */
957 #if 0
958         if (vme_slotnum != -1)
959                 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
960 #endif
961         slot = ca91cx42_slot_get();
962         dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
963         if (slot == 0) {
964                 dev_err(&pdev->dev, "Slot number is unset, not configuring "
965                         "CR/CSR space\n");
966                 return -EINVAL;
967         }
968
969         /* Allocate mem for CR/CSR image */
970         crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
971                 &crcsr_bus);
972         if (crcsr_kernel == NULL) {
973                 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
974                         "image\n");
975                 return -ENOMEM;
976         }
977
978         memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
979
980         crcsr_addr = slot * (512 * 1024);
981         iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
982
983         tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
984         tmp |= CA91CX42_VCSR_CTL_EN;
985         iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
986
987         return 0;
988 }
989
990 static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
991 {
992         u32 tmp;
993
994         /* Turn off CR/CSR space */
995         tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
996         tmp &= ~CA91CX42_VCSR_CTL_EN;
997         iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
998
999         /* Free image */
1000         iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
1001
1002         pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
1003 }
1004
1005 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1006 {
1007         int retval, i;
1008         u32 data;
1009         struct list_head *pos = NULL;
1010         struct vme_master_resource *master_image;
1011         struct vme_slave_resource *slave_image;
1012 #if 0
1013         struct vme_dma_resource *dma_ctrlr;
1014 #endif
1015         struct vme_lm_resource *lm;
1016
1017         /* We want to support more than one of each bridge so we need to
1018          * dynamically allocate the bridge structure
1019          */
1020         ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1021
1022         if (ca91cx42_bridge == NULL) {
1023                 dev_err(&pdev->dev, "Failed to allocate memory for device "
1024                         "structure\n");
1025                 retval = -ENOMEM;
1026                 goto err_struct;
1027         }
1028
1029         memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
1030
1031         /* Enable the device */
1032         retval = pci_enable_device(pdev);
1033         if (retval) {
1034                 dev_err(&pdev->dev, "Unable to enable device\n");
1035                 goto err_enable;
1036         }
1037
1038         /* Map Registers */
1039         retval = pci_request_regions(pdev, driver_name);
1040         if (retval) {
1041                 dev_err(&pdev->dev, "Unable to reserve resources\n");
1042                 goto err_resource;
1043         }
1044
1045         /* map registers in BAR 0 */
1046         ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1047                 4096);
1048         if (!ca91cx42_bridge->base) {
1049                 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1050                 retval = -EIO;
1051                 goto err_remap;
1052         }
1053
1054         /* Check to see if the mapping worked out */
1055         data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1056         if (data != PCI_VENDOR_ID_TUNDRA) {
1057                 dev_err(&pdev->dev, "PCI_ID check failed\n");
1058                 retval = -EIO;
1059                 goto err_test;
1060         }
1061
1062         /* Initialize wait queues & mutual exclusion flags */
1063         /* XXX These need to be moved to the vme_bridge structure */
1064         init_waitqueue_head(&dma_queue);
1065         init_waitqueue_head(&iack_queue);
1066         mutex_init(&(vme_int));
1067         mutex_init(&(vme_irq));
1068         mutex_init(&(vme_rmw));
1069
1070         ca91cx42_bridge->parent = &(pdev->dev);
1071         strcpy(ca91cx42_bridge->name, driver_name);
1072
1073         /* Setup IRQ */
1074         retval = ca91cx42_irq_init(ca91cx42_bridge);
1075         if (retval != 0) {
1076                 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1077                 goto err_irq;
1078         }
1079
1080         /* Add master windows to list */
1081         INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1082         for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1083                 master_image = kmalloc(sizeof(struct vme_master_resource),
1084                         GFP_KERNEL);
1085                 if (master_image == NULL) {
1086                         dev_err(&pdev->dev, "Failed to allocate memory for "
1087                         "master resource structure\n");
1088                         retval = -ENOMEM;
1089                         goto err_master;
1090                 }
1091                 master_image->parent = ca91cx42_bridge;
1092                 spin_lock_init(&(master_image->lock));
1093                 master_image->locked = 0;
1094                 master_image->number = i;
1095                 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1096                         VME_CRCSR | VME_USER1 | VME_USER2;
1097                 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1098                         VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1099                 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1100                 memset(&(master_image->pci_resource), 0,
1101                         sizeof(struct resource));
1102                 master_image->kern_base  = NULL;
1103                 list_add_tail(&(master_image->list),
1104                         &(ca91cx42_bridge->master_resources));
1105         }
1106
1107         /* Add slave windows to list */
1108         INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1109         for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1110                 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1111                         GFP_KERNEL);
1112                 if (slave_image == NULL) {
1113                         dev_err(&pdev->dev, "Failed to allocate memory for "
1114                         "slave resource structure\n");
1115                         retval = -ENOMEM;
1116                         goto err_slave;
1117                 }
1118                 slave_image->parent = ca91cx42_bridge;
1119                 mutex_init(&(slave_image->mtx));
1120                 slave_image->locked = 0;
1121                 slave_image->number = i;
1122                 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1123                         VME_USER2;
1124
1125                 /* Only windows 0 and 4 support A16 */
1126                 if (i == 0 || i == 4)
1127                         slave_image->address_attr |= VME_A16;
1128
1129                 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1130                         VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1131                 list_add_tail(&(slave_image->list),
1132                         &(ca91cx42_bridge->slave_resources));
1133         }
1134 #if 0
1135         /* Add dma engines to list */
1136         INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1137         for (i = 0; i < CA91C142_MAX_DMA; i++) {
1138                 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1139                         GFP_KERNEL);
1140                 if (dma_ctrlr == NULL) {
1141                         dev_err(&pdev->dev, "Failed to allocate memory for "
1142                         "dma resource structure\n");
1143                         retval = -ENOMEM;
1144                         goto err_dma;
1145                 }
1146                 dma_ctrlr->parent = ca91cx42_bridge;
1147                 mutex_init(&(dma_ctrlr->mtx));
1148                 dma_ctrlr->locked = 0;
1149                 dma_ctrlr->number = i;
1150                 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1151                 INIT_LIST_HEAD(&(dma_ctrlr->running));
1152                 list_add_tail(&(dma_ctrlr->list),
1153                         &(ca91cx42_bridge->dma_resources));
1154         }
1155 #endif
1156         /* Add location monitor to list */
1157         INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1158         lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1159         if (lm == NULL) {
1160                 dev_err(&pdev->dev, "Failed to allocate memory for "
1161                 "location monitor resource structure\n");
1162                 retval = -ENOMEM;
1163                 goto err_lm;
1164         }
1165         lm->parent = ca91cx42_bridge;
1166         mutex_init(&(lm->mtx));
1167         lm->locked = 0;
1168         lm->number = 1;
1169         lm->monitors = 4;
1170         list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1171
1172         ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1173         ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1174         ca91cx42_bridge->master_get = ca91cx42_master_get;
1175         ca91cx42_bridge->master_set = ca91cx42_master_set;
1176         ca91cx42_bridge->master_read = ca91cx42_master_read;
1177         ca91cx42_bridge->master_write = ca91cx42_master_write;
1178 #if 0
1179         ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1180         ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1181         ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1182         ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1183 #endif
1184         ca91cx42_bridge->request_irq = ca91cx42_request_irq;
1185         ca91cx42_bridge->free_irq = ca91cx42_free_irq;
1186         ca91cx42_bridge->generate_irq = ca91cx42_generate_irq;
1187 #if 0
1188         ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1189         ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1190         ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1191         ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1192 #endif
1193         ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1194
1195         data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1196         dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1197                 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1198         dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1199
1200         if (ca91cx42_crcsr_init(pdev)) {
1201                 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1202                 retval = -EINVAL;
1203 #if 0
1204                 goto err_crcsr;
1205 #endif
1206         }
1207
1208         /* Need to save ca91cx42_bridge pointer locally in link list for use in
1209          * ca91cx42_remove()
1210          */
1211         retval = vme_register_bridge(ca91cx42_bridge);
1212         if (retval != 0) {
1213                 dev_err(&pdev->dev, "Chip Registration failed.\n");
1214                 goto err_reg;
1215         }
1216
1217         return 0;
1218
1219         vme_unregister_bridge(ca91cx42_bridge);
1220 err_reg:
1221         ca91cx42_crcsr_exit(pdev);
1222 err_crcsr:
1223 err_lm:
1224         /* resources are stored in link list */
1225         list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1226                 lm = list_entry(pos, struct vme_lm_resource, list);
1227                 list_del(pos);
1228                 kfree(lm);
1229         }
1230 #if 0
1231 err_dma:
1232         /* resources are stored in link list */
1233         list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1234                 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1235                 list_del(pos);
1236                 kfree(dma_ctrlr);
1237         }
1238 #endif
1239 err_slave:
1240         /* resources are stored in link list */
1241         list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1242                 slave_image = list_entry(pos, struct vme_slave_resource, list);
1243                 list_del(pos);
1244                 kfree(slave_image);
1245         }
1246 err_master:
1247         /* resources are stored in link list */
1248         list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1249                 master_image = list_entry(pos, struct vme_master_resource,
1250                         list);
1251                 list_del(pos);
1252                 kfree(master_image);
1253         }
1254
1255         ca91cx42_irq_exit(pdev);
1256 err_irq:
1257 err_test:
1258         iounmap(ca91cx42_bridge->base);
1259 err_remap:
1260         pci_release_regions(pdev);
1261 err_resource:
1262         pci_disable_device(pdev);
1263 err_enable:
1264         kfree(ca91cx42_bridge);
1265 err_struct:
1266         return retval;
1267
1268 }
1269
1270 void ca91cx42_remove(struct pci_dev *pdev)
1271 {
1272         struct list_head *pos = NULL;
1273         struct vme_master_resource *master_image;
1274         struct vme_slave_resource *slave_image;
1275         struct vme_dma_resource *dma_ctrlr;
1276         struct vme_lm_resource *lm;
1277         int i;
1278
1279         /* Turn off Ints */
1280         iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1281
1282         /* Turn off the windows */
1283         iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1284         iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1285         iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1286         iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1287         iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1288         iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1289         iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1290         iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1291         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1292         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1293         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1294         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1295         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1296         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1297         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1298         iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1299
1300         vme_unregister_bridge(ca91cx42_bridge);
1301 #if 0
1302         ca91cx42_crcsr_exit(pdev);
1303 #endif
1304         /* resources are stored in link list */
1305         list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1306                 lm = list_entry(pos, struct vme_lm_resource, list);
1307                 list_del(pos);
1308                 kfree(lm);
1309         }
1310
1311         /* resources are stored in link list */
1312         list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1313                 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1314                 list_del(pos);
1315                 kfree(dma_ctrlr);
1316         }
1317
1318         /* resources are stored in link list */
1319         list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1320                 slave_image = list_entry(pos, struct vme_slave_resource, list);
1321                 list_del(pos);
1322                 kfree(slave_image);
1323         }
1324
1325         /* resources are stored in link list */
1326         list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1327                 master_image = list_entry(pos, struct vme_master_resource,
1328                         list);
1329                 list_del(pos);
1330                 kfree(master_image);
1331         }
1332
1333         ca91cx42_irq_exit(pdev);
1334
1335         iounmap(ca91cx42_bridge->base);
1336
1337         pci_release_regions(pdev);
1338
1339         pci_disable_device(pdev);
1340
1341         kfree(ca91cx42_bridge);
1342 }
1343
1344 static void __exit ca91cx42_exit(void)
1345 {
1346         pci_unregister_driver(&ca91cx42_driver);
1347 }
1348
1349 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1350 MODULE_LICENSE("GPL");
1351
1352 module_init(ca91cx42_init);
1353 module_exit(ca91cx42_exit);
1354
1355 /*----------------------------------------------------------------------------
1356  * STAGING
1357  *--------------------------------------------------------------------------*/
1358
1359 #if 0
1360 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >>  8) | ((X & 0x0000FF00) <<  8) | ((X & 0x000000FF) << 24))
1361
1362 int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1363 {
1364         int temp_ctl = 0;
1365         int tempBS = 0;
1366         int tempBD = 0;
1367         int tempTO = 0;
1368         int vmeBS = 0;
1369         int vmeBD = 0;
1370         int *rmw_pci_data_ptr = NULL;
1371         int *vaDataPtr = NULL;
1372         int i;
1373         vmeOutWindowCfg_t vmeOut;
1374         if (vmeRmw->maxAttempts < 1) {
1375                 return -EINVAL;
1376         }
1377         if (vmeRmw->targetAddrU) {
1378                 return -EINVAL;
1379         }
1380         /* Find the PCI address that maps to the desired VME address */
1381         for (i = 0; i < 8; i++) {
1382                 temp_ctl = ioread32(ca91cx42_bridge->base +
1383                         CA91CX42_LSI_CTL[i]);
1384                 if ((temp_ctl & 0x80000000) == 0) {
1385                         continue;
1386                 }
1387                 memset(&vmeOut, 0, sizeof(vmeOut));
1388                 vmeOut.windowNbr = i;
1389                 ca91cx42_get_out_bound(&vmeOut);
1390                 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1391                         continue;
1392                 }
1393                 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1394                 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1395                 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
1396                 vmeBS = tempBS + tempTO;
1397                 vmeBD = tempBD + tempTO;
1398                 if ((vmeRmw->targetAddr >= vmeBS) &&
1399                     (vmeRmw->targetAddr < vmeBD)) {
1400                         rmw_pci_data_ptr =
1401                             (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1402                         vaDataPtr =
1403                             (int *)(out_image_va[i] +
1404                                     (vmeRmw->targetAddr - vmeBS));
1405                         break;
1406                 }
1407         }
1408
1409         /* If no window - fail. */
1410         if (rmw_pci_data_ptr == NULL) {
1411                 return -EINVAL;
1412         }
1413         /* Setup the RMW registers. */
1414         iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1415         iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1416         iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1417                 SCYC_CMP);
1418         iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1419         iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1420         iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
1421
1422         /* Run the RMW cycle until either success or max attempts. */
1423         vmeRmw->numAttempts = 1;
1424         while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1425
1426                 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1427                     (vmeRmw->swapData & vmeRmw->enableMask)) {
1428
1429                         iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1430                         break;
1431
1432                 }
1433                 vmeRmw->numAttempts++;
1434         }
1435
1436         /* If no success, set num Attempts to be greater than max attempts */
1437         if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1438                 vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1439         }
1440
1441         return 0;
1442 }
1443
1444 int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1445 {
1446         unsigned int dctlreg = 0x80;
1447         struct vmeAttr *vmeAttr;
1448
1449         if (vmeDma->srcBus == VME_DMA_VME) {
1450                 dctlreg = 0;
1451                 vmeAttr = &vmeDma->srcVmeAttr;
1452         } else {
1453                 dctlreg = 0x80000000;
1454                 vmeAttr = &vmeDma->dstVmeAttr;
1455         }
1456
1457         switch (vmeAttr->maxDataWidth) {
1458         case VME_D8:
1459                 break;
1460         case VME_D16:
1461                 dctlreg |= 0x00400000;
1462                 break;
1463         case VME_D32:
1464                 dctlreg |= 0x00800000;
1465                 break;
1466         case VME_D64:
1467                 dctlreg |= 0x00C00000;
1468                 break;
1469         }
1470
1471         switch (vmeAttr->addrSpace) {
1472         case VME_A16:
1473                 break;
1474         case VME_A24:
1475                 dctlreg |= 0x00010000;
1476                 break;
1477         case VME_A32:
1478                 dctlreg |= 0x00020000;
1479                 break;
1480         case VME_USER1:
1481                 dctlreg |= 0x00060000;
1482                 break;
1483         case VME_USER2:
1484                 dctlreg |= 0x00070000;
1485                 break;
1486
1487         case VME_A64:           /* not supported in Universe DMA */
1488         case VME_CRCSR:
1489         case VME_USER3:
1490         case VME_USER4:
1491                 return -EINVAL;
1492                 break;
1493         }
1494         if (vmeAttr->userAccessType == VME_PROG) {
1495                 dctlreg |= 0x00004000;
1496         }
1497         if (vmeAttr->dataAccessType == VME_SUPER) {
1498                 dctlreg |= 0x00001000;
1499         }
1500         if (vmeAttr->xferProtocol != VME_SCT) {
1501                 dctlreg |= 0x00000100;
1502         }
1503         *dctlregreturn = dctlreg;
1504         return 0;
1505 }
1506
1507 unsigned int
1508 ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1509 {
1510         unsigned int val;
1511
1512         /* Setup registers as needed for direct or chained. */
1513         if (dgcsreg & 0x8000000) {
1514                 iowrite32(0, ca91cx42_bridge->base + DTBC);
1515                 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
1516         } else {
1517 #if     0
1518                 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1519                 printk(KERN_ERR "Starting: DVA  = %08x\n",
1520                         ioread32(&vmeLL->dva));
1521                 printk(KERN_ERR "Starting: DLV  = %08x\n",
1522                         ioread32(&vmeLL->dlv));
1523                 printk(KERN_ERR "Starting: DTBC = %08x\n",
1524                         ioread32(&vmeLL->dtbc));
1525                 printk(KERN_ERR "Starting: DCTL = %08x\n",
1526                         ioread32(&vmeLL->dctl));
1527 #endif
1528                 /* Write registers */
1529                 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1530                 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1531                 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1532                 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1533                 iowrite32(0, ca91cx42_bridge->base + DCPP);
1534         }
1535
1536         /* Start the operation */
1537         iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
1538         val = get_tbl();
1539         iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1540         return val;
1541 }
1542
1543 TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1544 {
1545         vmeDmaPacket_t *vmeCur;
1546         int maxPerPage;
1547         int currentLLcount;
1548         TDMA_Cmd_Packet *startLL;
1549         TDMA_Cmd_Packet *currentLL;
1550         TDMA_Cmd_Packet *nextLL;
1551         unsigned int dctlreg = 0;
1552
1553         maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1554         startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1555         if (startLL == 0) {
1556                 return startLL;
1557         }
1558         /* First allocate pages for descriptors and create linked list */
1559         vmeCur = vmeDma;
1560         currentLL = startLL;
1561         currentLLcount = 0;
1562         while (vmeCur != 0) {
1563                 if (vmeCur->pNextPacket != 0) {
1564                         currentLL->dcpp = (unsigned int)(currentLL + 1);
1565                         currentLLcount++;
1566                         if (currentLLcount >= maxPerPage) {
1567                                 currentLL->dcpp =
1568                                     __get_free_pages(GFP_KERNEL, 0);
1569                                 currentLLcount = 0;
1570                         }
1571                         currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1572                 } else {
1573                         currentLL->dcpp = (unsigned int)0;
1574                 }
1575                 vmeCur = vmeCur->pNextPacket;
1576         }
1577
1578         /* Next fill in information for each descriptor */
1579         vmeCur = vmeDma;
1580         currentLL = startLL;
1581         while (vmeCur != 0) {
1582                 if (vmeCur->srcBus == VME_DMA_VME) {
1583                         iowrite32(vmeCur->srcAddr, &currentLL->dva);
1584                         iowrite32(vmeCur->dstAddr, &currentLL->dlv);
1585                 } else {
1586                         iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1587                         iowrite32(vmeCur->dstAddr, &currentLL->dva);
1588                 }
1589                 uniSetupDctlReg(vmeCur, &dctlreg);
1590                 iowrite32(dctlreg, &currentLL->dctl);
1591                 iowrite32(vmeCur->byteCount, &currentLL->dtbc);
1592
1593                 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1594                 vmeCur = vmeCur->pNextPacket;
1595         }
1596
1597         /* Convert Links to PCI addresses. */
1598         currentLL = startLL;
1599         while (currentLL != 0) {
1600                 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1601                 if (nextLL == 0) {
1602                         iowrite32(1, &currentLL->dcpp);
1603                 } else {
1604                         iowrite32((unsigned int)virt_to_bus(nextLL),
1605                                &currentLL->dcpp);
1606                 }
1607                 currentLL = nextLL;
1608         }
1609
1610         /* Return pointer to descriptors list */
1611         return startLL;
1612 }
1613
1614 int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1615 {
1616         TDMA_Cmd_Packet *currentLL;
1617         TDMA_Cmd_Packet *prevLL;
1618         TDMA_Cmd_Packet *nextLL;
1619         unsigned int dcppreg;
1620
1621         /* Convert Links to virtual addresses. */
1622         currentLL = startLL;
1623         while (currentLL != 0) {
1624                 dcppreg = ioread32(&currentLL->dcpp);
1625                 dcppreg &= ~6;
1626                 if (dcppreg & 1) {
1627                         currentLL->dcpp = 0;
1628                 } else {
1629                         currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1630                 }
1631                 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1632         }
1633
1634         /* Free all pages associated with the descriptors. */
1635         currentLL = startLL;
1636         prevLL = currentLL;
1637         while (currentLL != 0) {
1638                 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1639                 if (currentLL + 1 != nextLL) {
1640                         free_pages((int)prevLL, 0);
1641                         prevLL = nextLL;
1642                 }
1643                 currentLL = nextLL;
1644         }
1645
1646         /* Return pointer to descriptors list */
1647         return 0;
1648 }
1649
1650 int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1651 {
1652         unsigned int dgcsreg = 0;
1653         unsigned int dctlreg = 0;
1654         int val;
1655         int channel, x;
1656         vmeDmaPacket_t *curDma;
1657         TDMA_Cmd_Packet *dmaLL;
1658
1659         /* Sanity check the VME chain. */
1660         channel = vmeDma->channel_number;
1661         if (channel > 0) {
1662                 return -EINVAL;
1663         }
1664         curDma = vmeDma;
1665         while (curDma != 0) {
1666                 if (curDma->byteCount == 0) {
1667                         return -EINVAL;
1668                 }
1669                 if (curDma->byteCount >= 0x1000000) {
1670                         return -EINVAL;
1671                 }
1672                 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1673                         return -EINVAL;
1674                 }
1675                 switch (curDma->srcBus) {
1676                 case VME_DMA_PCI:
1677                         if (curDma->dstBus != VME_DMA_VME) {
1678                                 return -EINVAL;
1679                         }
1680                         break;
1681                 case VME_DMA_VME:
1682                         if (curDma->dstBus != VME_DMA_PCI) {
1683                                 return -EINVAL;
1684                         }
1685                         break;
1686                 default:
1687                         return -EINVAL;
1688                         break;
1689                 }
1690                 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1691                         return -EINVAL;
1692                 }
1693
1694                 curDma = curDma->pNextPacket;
1695                 if (curDma == vmeDma) { /* Endless Loop! */
1696                         return -EINVAL;
1697                 }
1698         }
1699
1700         /* calculate control register */
1701         if (vmeDma->pNextPacket != 0) {
1702                 dgcsreg = 0x8000000;
1703         } else {
1704                 dgcsreg = 0;
1705         }
1706
1707         for (x = 0; x < 8; x++) {       /* vme block size */
1708                 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1709                         break;
1710                 }
1711         }
1712         if (x == 8)
1713                 x = 7;
1714         dgcsreg |= (x << 20);
1715
1716         if (vmeDma->vmeBackOffTimer) {
1717                 for (x = 1; x < 8; x++) {       /* vme timer */
1718                         if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1719                                 break;
1720                         }
1721                 }
1722                 if (x == 8)
1723                         x = 7;
1724                 dgcsreg |= (x << 16);
1725         }
1726         /*` Setup the dma chain */
1727         dmaLL = ca91cx42_setup_dma(vmeDma);
1728
1729         /* Start the DMA */
1730         if (dgcsreg & 0x8000000) {
1731                 vmeDma->vmeDmaStartTick =
1732                     ca91cx42_start_dma(channel, dgcsreg,
1733                                   (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1734         } else {
1735                 vmeDma->vmeDmaStartTick =
1736                     ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1737         }
1738
1739         wait_event_interruptible(dma_queue,
1740                 ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
1741
1742         val = ioread32(ca91cx42_bridge->base + DGCS);
1743         iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
1744
1745         vmeDma->vmeDmaStatus = 0;
1746
1747         if (!(val & 0x00000800)) {
1748                 vmeDma->vmeDmaStatus = val & 0x700;
1749                 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1750                         " DGCS=%08X\n", val);
1751                 val = ioread32(ca91cx42_bridge->base + DCPP);
1752                 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1753                 val = ioread32(ca91cx42_bridge->base + DCTL);
1754                 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1755                 val = ioread32(ca91cx42_bridge->base + DTBC);
1756                 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1757                 val = ioread32(ca91cx42_bridge->base + DLA);
1758                 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1759                 val = ioread32(ca91cx42_bridge->base + DVA);
1760                 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1761
1762         }
1763         /* Free the dma chain */
1764         ca91cx42_free_dma(dmaLL);
1765
1766         return 0;
1767 }
1768
1769 int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1770 {
1771         int temp_ctl = 0;
1772
1773         if (vmeLm->addrU)
1774                 return -EINVAL;
1775
1776         switch (vmeLm->addrSpace) {
1777         case VME_A64:
1778         case VME_USER3:
1779         case VME_USER4:
1780                 return -EINVAL;
1781         case VME_A16:
1782                 temp_ctl |= 0x00000;
1783                 break;
1784         case VME_A24:
1785                 temp_ctl |= 0x10000;
1786                 break;
1787         case VME_A32:
1788                 temp_ctl |= 0x20000;
1789                 break;
1790         case VME_CRCSR:
1791                 temp_ctl |= 0x50000;
1792                 break;
1793         case VME_USER1:
1794                 temp_ctl |= 0x60000;
1795                 break;
1796         case VME_USER2:
1797                 temp_ctl |= 0x70000;
1798                 break;
1799         }
1800
1801         /* Disable while we are mucking around */
1802         iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1803
1804         iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1805
1806         /* Setup CTL register. */
1807         if (vmeLm->userAccessType & VME_SUPER)
1808                 temp_ctl |= 0x00200000;
1809         if (vmeLm->userAccessType & VME_USER)
1810                 temp_ctl |= 0x00100000;
1811         if (vmeLm->dataAccessType & VME_PROG)
1812                 temp_ctl |= 0x00800000;
1813         if (vmeLm->dataAccessType & VME_DATA)
1814                 temp_ctl |= 0x00400000;
1815
1816
1817         /* Write ctl reg and enable */
1818         iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1819         temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1820
1821         return 0;
1822 }
1823
1824 int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1825 {
1826         unsigned long flags;
1827         unsigned int tmp;
1828
1829         spin_lock_irqsave(&lm_lock, flags);
1830         spin_unlock_irqrestore(&lm_lock, flags);
1831         if (tmp == 0) {
1832                 if (vmeLm->lmWait < 10)
1833                         vmeLm->lmWait = 10;
1834                 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1835         }
1836         iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1837
1838         return 0;
1839 }
1840
1841
1842
1843 int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1844 {
1845         int temp_ctl = 0;
1846         int vbto = 0;
1847
1848         temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1849         temp_ctl &= 0x00FFFFFF;
1850
1851         if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1852                 vbto = 7;
1853         } else if (vmeArb->globalTimeoutTimer > 1024) {
1854                 return -EINVAL;
1855         } else if (vmeArb->globalTimeoutTimer == 0) {
1856                 vbto = 0;
1857         } else {
1858                 vbto = 1;
1859                 while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1860                         vbto += 1;
1861         }
1862         temp_ctl |= (vbto << 28);
1863
1864         if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1865                 temp_ctl |= 1 << 26;
1866
1867         if (vmeArb->arbiterTimeoutFlag)
1868                 temp_ctl |= 2 << 24;
1869
1870         iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1871         return 0;
1872 }
1873
1874 int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1875 {
1876         int temp_ctl = 0;
1877         int vbto = 0;
1878
1879         temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1880
1881         vbto = (temp_ctl >> 28) & 0xF;
1882         if (vbto != 0)
1883                 vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1884
1885         if (temp_ctl & (1 << 26))
1886                 vmeArb->arbiterMode = VME_PRIORITY_MODE;
1887         else
1888                 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1889
1890         if (temp_ctl & (3 << 24))
1891                 vmeArb->arbiterTimeoutFlag = 1;
1892
1893         return 0;
1894 }
1895
1896 int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1897 {
1898         int temp_ctl = 0;
1899
1900         temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1901         temp_ctl &= 0xFF0FFFFF;
1902
1903         if (vmeReq->releaseMode == 1)
1904                 temp_ctl |= (1 << 20);
1905
1906         if (vmeReq->fairMode == 1)
1907                 temp_ctl |= (1 << 21);
1908
1909         temp_ctl |= (vmeReq->requestLevel << 22);
1910
1911         iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1912         return 0;
1913 }
1914
1915 int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1916 {
1917         int temp_ctl = 0;
1918
1919         temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1920
1921         if (temp_ctl & (1 << 20))
1922                 vmeReq->releaseMode = 1;
1923
1924         if (temp_ctl & (1 << 21))
1925                 vmeReq->fairMode = 1;
1926
1927         vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;
1928
1929         return 0;
1930 }
1931
1932
1933 #endif