4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/init.h>
17 #include <linux/export.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/vme.h>
35 #include "vme_bridge.h"
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers;
39 static LIST_HEAD(vme_bus_list);
40 static DEFINE_MUTEX(vme_buses_lock);
42 static int __init vme_init(void);
44 static struct vme_dev *dev_to_vme_dev(struct device *dev)
46 return container_of(dev, struct vme_dev, dev);
50 * Find the bridge that the resource is associated with.
52 static struct vme_bridge *find_bridge(struct vme_resource *resource)
54 /* Get list to search */
55 switch (resource->type) {
57 return list_entry(resource->entry, struct vme_master_resource,
61 return list_entry(resource->entry, struct vme_slave_resource,
65 return list_entry(resource->entry, struct vme_dma_resource,
69 return list_entry(resource->entry, struct vme_lm_resource,
73 printk(KERN_ERR "Unknown resource type\n");
80 * Allocate a contiguous block of memory for use by the driver. This is used to
81 * create the buffers for the slave windows.
83 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
86 struct vme_bridge *bridge;
88 if (resource == NULL) {
89 printk(KERN_ERR "No resource\n");
93 bridge = find_bridge(resource);
95 printk(KERN_ERR "Can't find bridge\n");
99 if (bridge->parent == NULL) {
100 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
104 if (bridge->alloc_consistent == NULL) {
105 printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
110 return bridge->alloc_consistent(bridge->parent, size, dma);
112 EXPORT_SYMBOL(vme_alloc_consistent);
115 * Free previously allocated contiguous block of memory.
117 void vme_free_consistent(struct vme_resource *resource, size_t size,
118 void *vaddr, dma_addr_t dma)
120 struct vme_bridge *bridge;
122 if (resource == NULL) {
123 printk(KERN_ERR "No resource\n");
127 bridge = find_bridge(resource);
128 if (bridge == NULL) {
129 printk(KERN_ERR "Can't find bridge\n");
133 if (bridge->parent == NULL) {
134 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
138 if (bridge->free_consistent == NULL) {
139 printk(KERN_ERR "free_consistent not supported by bridge %s\n",
144 bridge->free_consistent(bridge->parent, size, vaddr, dma);
146 EXPORT_SYMBOL(vme_free_consistent);
148 size_t vme_get_size(struct vme_resource *resource)
151 unsigned long long base, size;
153 u32 aspace, cycle, dwidth;
155 switch (resource->type) {
157 retval = vme_master_get(resource, &enabled, &base, &size,
158 &aspace, &cycle, &dwidth);
163 retval = vme_slave_get(resource, &enabled, &base, &size,
164 &buf_base, &aspace, &cycle);
172 printk(KERN_ERR "Unknown resource type\n");
177 EXPORT_SYMBOL(vme_get_size);
179 int vme_check_window(u32 aspace, unsigned long long vme_base,
180 unsigned long long size)
186 if (((vme_base + size) > VME_A16_MAX) ||
187 (vme_base > VME_A16_MAX))
191 if (((vme_base + size) > VME_A24_MAX) ||
192 (vme_base > VME_A24_MAX))
196 if (((vme_base + size) > VME_A32_MAX) ||
197 (vme_base > VME_A32_MAX))
201 if ((size != 0) && (vme_base > U64_MAX + 1 - size))
205 if (((vme_base + size) > VME_CRCSR_MAX) ||
206 (vme_base > VME_CRCSR_MAX))
216 printk(KERN_ERR "Invalid address space\n");
223 EXPORT_SYMBOL(vme_check_window);
225 static u32 vme_get_aspace(int am)
259 * Request a slave image with specific attributes, return some unique
262 struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
265 struct vme_bridge *bridge;
266 struct list_head *slave_pos = NULL;
267 struct vme_slave_resource *allocated_image = NULL;
268 struct vme_slave_resource *slave_image = NULL;
269 struct vme_resource *resource = NULL;
271 bridge = vdev->bridge;
272 if (bridge == NULL) {
273 printk(KERN_ERR "Can't find VME bus\n");
277 /* Loop through slave resources */
278 list_for_each(slave_pos, &bridge->slave_resources) {
279 slave_image = list_entry(slave_pos,
280 struct vme_slave_resource, list);
282 if (slave_image == NULL) {
283 printk(KERN_ERR "Registered NULL Slave resource\n");
287 /* Find an unlocked and compatible image */
288 mutex_lock(&slave_image->mtx);
289 if (((slave_image->address_attr & address) == address) &&
290 ((slave_image->cycle_attr & cycle) == cycle) &&
291 (slave_image->locked == 0)) {
293 slave_image->locked = 1;
294 mutex_unlock(&slave_image->mtx);
295 allocated_image = slave_image;
298 mutex_unlock(&slave_image->mtx);
302 if (allocated_image == NULL)
305 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
306 if (resource == NULL) {
307 printk(KERN_WARNING "Unable to allocate resource structure\n");
310 resource->type = VME_SLAVE;
311 resource->entry = &allocated_image->list;
317 mutex_lock(&slave_image->mtx);
318 slave_image->locked = 0;
319 mutex_unlock(&slave_image->mtx);
324 EXPORT_SYMBOL(vme_slave_request);
326 int vme_slave_set(struct vme_resource *resource, int enabled,
327 unsigned long long vme_base, unsigned long long size,
328 dma_addr_t buf_base, u32 aspace, u32 cycle)
330 struct vme_bridge *bridge = find_bridge(resource);
331 struct vme_slave_resource *image;
334 if (resource->type != VME_SLAVE) {
335 printk(KERN_ERR "Not a slave resource\n");
339 image = list_entry(resource->entry, struct vme_slave_resource, list);
341 if (bridge->slave_set == NULL) {
342 printk(KERN_ERR "Function not supported\n");
346 if (!(((image->address_attr & aspace) == aspace) &&
347 ((image->cycle_attr & cycle) == cycle))) {
348 printk(KERN_ERR "Invalid attributes\n");
352 retval = vme_check_window(aspace, vme_base, size);
356 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
359 EXPORT_SYMBOL(vme_slave_set);
361 int vme_slave_get(struct vme_resource *resource, int *enabled,
362 unsigned long long *vme_base, unsigned long long *size,
363 dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
365 struct vme_bridge *bridge = find_bridge(resource);
366 struct vme_slave_resource *image;
368 if (resource->type != VME_SLAVE) {
369 printk(KERN_ERR "Not a slave resource\n");
373 image = list_entry(resource->entry, struct vme_slave_resource, list);
375 if (bridge->slave_get == NULL) {
376 printk(KERN_ERR "vme_slave_get not supported\n");
380 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
383 EXPORT_SYMBOL(vme_slave_get);
385 void vme_slave_free(struct vme_resource *resource)
387 struct vme_slave_resource *slave_image;
389 if (resource->type != VME_SLAVE) {
390 printk(KERN_ERR "Not a slave resource\n");
394 slave_image = list_entry(resource->entry, struct vme_slave_resource,
396 if (slave_image == NULL) {
397 printk(KERN_ERR "Can't find slave resource\n");
402 mutex_lock(&slave_image->mtx);
403 if (slave_image->locked == 0)
404 printk(KERN_ERR "Image is already free\n");
406 slave_image->locked = 0;
407 mutex_unlock(&slave_image->mtx);
409 /* Free up resource memory */
412 EXPORT_SYMBOL(vme_slave_free);
415 * Request a master image with specific attributes, return some unique
418 struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
419 u32 cycle, u32 dwidth)
421 struct vme_bridge *bridge;
422 struct list_head *master_pos = NULL;
423 struct vme_master_resource *allocated_image = NULL;
424 struct vme_master_resource *master_image = NULL;
425 struct vme_resource *resource = NULL;
427 bridge = vdev->bridge;
428 if (bridge == NULL) {
429 printk(KERN_ERR "Can't find VME bus\n");
433 /* Loop through master resources */
434 list_for_each(master_pos, &bridge->master_resources) {
435 master_image = list_entry(master_pos,
436 struct vme_master_resource, list);
438 if (master_image == NULL) {
439 printk(KERN_WARNING "Registered NULL master resource\n");
443 /* Find an unlocked and compatible image */
444 spin_lock(&master_image->lock);
445 if (((master_image->address_attr & address) == address) &&
446 ((master_image->cycle_attr & cycle) == cycle) &&
447 ((master_image->width_attr & dwidth) == dwidth) &&
448 (master_image->locked == 0)) {
450 master_image->locked = 1;
451 spin_unlock(&master_image->lock);
452 allocated_image = master_image;
455 spin_unlock(&master_image->lock);
458 /* Check to see if we found a resource */
459 if (allocated_image == NULL) {
460 printk(KERN_ERR "Can't find a suitable resource\n");
464 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
465 if (resource == NULL) {
466 printk(KERN_ERR "Unable to allocate resource structure\n");
469 resource->type = VME_MASTER;
470 resource->entry = &allocated_image->list;
476 spin_lock(&master_image->lock);
477 master_image->locked = 0;
478 spin_unlock(&master_image->lock);
483 EXPORT_SYMBOL(vme_master_request);
485 int vme_master_set(struct vme_resource *resource, int enabled,
486 unsigned long long vme_base, unsigned long long size, u32 aspace,
487 u32 cycle, u32 dwidth)
489 struct vme_bridge *bridge = find_bridge(resource);
490 struct vme_master_resource *image;
493 if (resource->type != VME_MASTER) {
494 printk(KERN_ERR "Not a master resource\n");
498 image = list_entry(resource->entry, struct vme_master_resource, list);
500 if (bridge->master_set == NULL) {
501 printk(KERN_WARNING "vme_master_set not supported\n");
505 if (!(((image->address_attr & aspace) == aspace) &&
506 ((image->cycle_attr & cycle) == cycle) &&
507 ((image->width_attr & dwidth) == dwidth))) {
508 printk(KERN_WARNING "Invalid attributes\n");
512 retval = vme_check_window(aspace, vme_base, size);
516 return bridge->master_set(image, enabled, vme_base, size, aspace,
519 EXPORT_SYMBOL(vme_master_set);
521 int vme_master_get(struct vme_resource *resource, int *enabled,
522 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
523 u32 *cycle, u32 *dwidth)
525 struct vme_bridge *bridge = find_bridge(resource);
526 struct vme_master_resource *image;
528 if (resource->type != VME_MASTER) {
529 printk(KERN_ERR "Not a master resource\n");
533 image = list_entry(resource->entry, struct vme_master_resource, list);
535 if (bridge->master_get == NULL) {
536 printk(KERN_WARNING "%s not supported\n", __func__);
540 return bridge->master_get(image, enabled, vme_base, size, aspace,
543 EXPORT_SYMBOL(vme_master_get);
546 * Read data out of VME space into a buffer.
548 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
551 struct vme_bridge *bridge = find_bridge(resource);
552 struct vme_master_resource *image;
555 if (bridge->master_read == NULL) {
556 printk(KERN_WARNING "Reading from resource not supported\n");
560 if (resource->type != VME_MASTER) {
561 printk(KERN_ERR "Not a master resource\n");
565 image = list_entry(resource->entry, struct vme_master_resource, list);
567 length = vme_get_size(resource);
569 if (offset > length) {
570 printk(KERN_WARNING "Invalid Offset\n");
574 if ((offset + count) > length)
575 count = length - offset;
577 return bridge->master_read(image, buf, count, offset);
580 EXPORT_SYMBOL(vme_master_read);
583 * Write data out to VME space from a buffer.
585 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
586 size_t count, loff_t offset)
588 struct vme_bridge *bridge = find_bridge(resource);
589 struct vme_master_resource *image;
592 if (bridge->master_write == NULL) {
593 printk(KERN_WARNING "Writing to resource not supported\n");
597 if (resource->type != VME_MASTER) {
598 printk(KERN_ERR "Not a master resource\n");
602 image = list_entry(resource->entry, struct vme_master_resource, list);
604 length = vme_get_size(resource);
606 if (offset > length) {
607 printk(KERN_WARNING "Invalid Offset\n");
611 if ((offset + count) > length)
612 count = length - offset;
614 return bridge->master_write(image, buf, count, offset);
616 EXPORT_SYMBOL(vme_master_write);
619 * Perform RMW cycle to provided location.
621 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
622 unsigned int compare, unsigned int swap, loff_t offset)
624 struct vme_bridge *bridge = find_bridge(resource);
625 struct vme_master_resource *image;
627 if (bridge->master_rmw == NULL) {
628 printk(KERN_WARNING "Writing to resource not supported\n");
632 if (resource->type != VME_MASTER) {
633 printk(KERN_ERR "Not a master resource\n");
637 image = list_entry(resource->entry, struct vme_master_resource, list);
639 return bridge->master_rmw(image, mask, compare, swap, offset);
641 EXPORT_SYMBOL(vme_master_rmw);
643 int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
645 struct vme_master_resource *image;
646 phys_addr_t phys_addr;
647 unsigned long vma_size;
649 if (resource->type != VME_MASTER) {
650 pr_err("Not a master resource\n");
654 image = list_entry(resource->entry, struct vme_master_resource, list);
655 phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
656 vma_size = vma->vm_end - vma->vm_start;
658 if (phys_addr + vma_size > image->bus_resource.end + 1) {
659 pr_err("Map size cannot exceed the window size\n");
663 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
665 return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
667 EXPORT_SYMBOL(vme_master_mmap);
669 void vme_master_free(struct vme_resource *resource)
671 struct vme_master_resource *master_image;
673 if (resource->type != VME_MASTER) {
674 printk(KERN_ERR "Not a master resource\n");
678 master_image = list_entry(resource->entry, struct vme_master_resource,
680 if (master_image == NULL) {
681 printk(KERN_ERR "Can't find master resource\n");
686 spin_lock(&master_image->lock);
687 if (master_image->locked == 0)
688 printk(KERN_ERR "Image is already free\n");
690 master_image->locked = 0;
691 spin_unlock(&master_image->lock);
693 /* Free up resource memory */
696 EXPORT_SYMBOL(vme_master_free);
699 * Request a DMA controller with specific attributes, return some unique
702 struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
704 struct vme_bridge *bridge;
705 struct list_head *dma_pos = NULL;
706 struct vme_dma_resource *allocated_ctrlr = NULL;
707 struct vme_dma_resource *dma_ctrlr = NULL;
708 struct vme_resource *resource = NULL;
710 /* XXX Not checking resource attributes */
711 printk(KERN_ERR "No VME resource Attribute tests done\n");
713 bridge = vdev->bridge;
714 if (bridge == NULL) {
715 printk(KERN_ERR "Can't find VME bus\n");
719 /* Loop through DMA resources */
720 list_for_each(dma_pos, &bridge->dma_resources) {
721 dma_ctrlr = list_entry(dma_pos,
722 struct vme_dma_resource, list);
724 if (dma_ctrlr == NULL) {
725 printk(KERN_ERR "Registered NULL DMA resource\n");
729 /* Find an unlocked and compatible controller */
730 mutex_lock(&dma_ctrlr->mtx);
731 if (((dma_ctrlr->route_attr & route) == route) &&
732 (dma_ctrlr->locked == 0)) {
734 dma_ctrlr->locked = 1;
735 mutex_unlock(&dma_ctrlr->mtx);
736 allocated_ctrlr = dma_ctrlr;
739 mutex_unlock(&dma_ctrlr->mtx);
742 /* Check to see if we found a resource */
743 if (allocated_ctrlr == NULL)
746 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
747 if (resource == NULL) {
748 printk(KERN_WARNING "Unable to allocate resource structure\n");
751 resource->type = VME_DMA;
752 resource->entry = &allocated_ctrlr->list;
758 mutex_lock(&dma_ctrlr->mtx);
759 dma_ctrlr->locked = 0;
760 mutex_unlock(&dma_ctrlr->mtx);
765 EXPORT_SYMBOL(vme_dma_request);
770 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
772 struct vme_dma_resource *ctrlr;
773 struct vme_dma_list *dma_list;
775 if (resource->type != VME_DMA) {
776 printk(KERN_ERR "Not a DMA resource\n");
780 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
782 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
783 if (dma_list == NULL) {
784 printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
787 INIT_LIST_HEAD(&dma_list->entries);
788 dma_list->parent = ctrlr;
789 mutex_init(&dma_list->mtx);
793 EXPORT_SYMBOL(vme_new_dma_list);
796 * Create "Pattern" type attributes
798 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
800 struct vme_dma_attr *attributes;
801 struct vme_dma_pattern *pattern_attr;
803 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
804 if (attributes == NULL) {
805 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
809 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
810 if (pattern_attr == NULL) {
811 printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
815 attributes->type = VME_DMA_PATTERN;
816 attributes->private = (void *)pattern_attr;
818 pattern_attr->pattern = pattern;
819 pattern_attr->type = type;
828 EXPORT_SYMBOL(vme_dma_pattern_attribute);
831 * Create "PCI" type attributes
833 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
835 struct vme_dma_attr *attributes;
836 struct vme_dma_pci *pci_attr;
838 /* XXX Run some sanity checks here */
840 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
841 if (attributes == NULL) {
842 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
846 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
847 if (pci_attr == NULL) {
848 printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
854 attributes->type = VME_DMA_PCI;
855 attributes->private = (void *)pci_attr;
857 pci_attr->address = address;
866 EXPORT_SYMBOL(vme_dma_pci_attribute);
869 * Create "VME" type attributes
871 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
872 u32 aspace, u32 cycle, u32 dwidth)
874 struct vme_dma_attr *attributes;
875 struct vme_dma_vme *vme_attr;
877 attributes = kmalloc(
878 sizeof(struct vme_dma_attr), GFP_KERNEL);
879 if (attributes == NULL) {
880 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
884 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
885 if (vme_attr == NULL) {
886 printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
890 attributes->type = VME_DMA_VME;
891 attributes->private = (void *)vme_attr;
893 vme_attr->address = address;
894 vme_attr->aspace = aspace;
895 vme_attr->cycle = cycle;
896 vme_attr->dwidth = dwidth;
905 EXPORT_SYMBOL(vme_dma_vme_attribute);
910 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
912 kfree(attributes->private);
915 EXPORT_SYMBOL(vme_dma_free_attribute);
917 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
918 struct vme_dma_attr *dest, size_t count)
920 struct vme_bridge *bridge = list->parent->parent;
923 if (bridge->dma_list_add == NULL) {
924 printk(KERN_WARNING "Link List DMA generation not supported\n");
928 if (!mutex_trylock(&list->mtx)) {
929 printk(KERN_ERR "Link List already submitted\n");
933 retval = bridge->dma_list_add(list, src, dest, count);
935 mutex_unlock(&list->mtx);
939 EXPORT_SYMBOL(vme_dma_list_add);
941 int vme_dma_list_exec(struct vme_dma_list *list)
943 struct vme_bridge *bridge = list->parent->parent;
946 if (bridge->dma_list_exec == NULL) {
947 printk(KERN_ERR "Link List DMA execution not supported\n");
951 mutex_lock(&list->mtx);
953 retval = bridge->dma_list_exec(list);
955 mutex_unlock(&list->mtx);
959 EXPORT_SYMBOL(vme_dma_list_exec);
961 int vme_dma_list_free(struct vme_dma_list *list)
963 struct vme_bridge *bridge = list->parent->parent;
966 if (bridge->dma_list_empty == NULL) {
967 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
971 if (!mutex_trylock(&list->mtx)) {
972 printk(KERN_ERR "Link List in use\n");
977 * Empty out all of the entries from the DMA list. We need to go to the
978 * low level driver as DMA entries are driver specific.
980 retval = bridge->dma_list_empty(list);
982 printk(KERN_ERR "Unable to empty link-list entries\n");
983 mutex_unlock(&list->mtx);
986 mutex_unlock(&list->mtx);
991 EXPORT_SYMBOL(vme_dma_list_free);
993 int vme_dma_free(struct vme_resource *resource)
995 struct vme_dma_resource *ctrlr;
997 if (resource->type != VME_DMA) {
998 printk(KERN_ERR "Not a DMA resource\n");
1002 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1004 if (!mutex_trylock(&ctrlr->mtx)) {
1005 printk(KERN_ERR "Resource busy, can't free\n");
1009 if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1010 printk(KERN_WARNING "Resource still processing transfers\n");
1011 mutex_unlock(&ctrlr->mtx);
1017 mutex_unlock(&ctrlr->mtx);
1023 EXPORT_SYMBOL(vme_dma_free);
1025 void vme_bus_error_handler(struct vme_bridge *bridge,
1026 unsigned long long address, int am)
1028 struct list_head *handler_pos = NULL;
1029 struct vme_error_handler *handler;
1030 int handler_triggered = 0;
1031 u32 aspace = vme_get_aspace(am);
1033 list_for_each(handler_pos, &bridge->vme_error_handlers) {
1034 handler = list_entry(handler_pos, struct vme_error_handler,
1036 if ((aspace == handler->aspace) &&
1037 (address >= handler->start) &&
1038 (address < handler->end)) {
1039 if (!handler->num_errors)
1040 handler->first_error = address;
1041 if (handler->num_errors != UINT_MAX)
1042 handler->num_errors++;
1043 handler_triggered = 1;
1047 if (!handler_triggered)
1048 dev_err(bridge->parent,
1049 "Unhandled VME access error at address 0x%llx\n",
1052 EXPORT_SYMBOL(vme_bus_error_handler);
1054 struct vme_error_handler *vme_register_error_handler(
1055 struct vme_bridge *bridge, u32 aspace,
1056 unsigned long long address, size_t len)
1058 struct vme_error_handler *handler;
1060 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
1064 handler->aspace = aspace;
1065 handler->start = address;
1066 handler->end = address + len;
1067 handler->num_errors = 0;
1068 handler->first_error = 0;
1069 list_add_tail(&handler->list, &bridge->vme_error_handlers);
1073 EXPORT_SYMBOL(vme_register_error_handler);
1075 void vme_unregister_error_handler(struct vme_error_handler *handler)
1077 list_del(&handler->list);
1080 EXPORT_SYMBOL(vme_unregister_error_handler);
1082 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1084 void (*call)(int, int, void *);
1087 call = bridge->irq[level - 1].callback[statid].func;
1088 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1091 call(level, statid, priv_data);
1093 printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1096 EXPORT_SYMBOL(vme_irq_handler);
1098 int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1099 void (*callback)(int, int, void *),
1102 struct vme_bridge *bridge;
1104 bridge = vdev->bridge;
1105 if (bridge == NULL) {
1106 printk(KERN_ERR "Can't find VME bus\n");
1110 if ((level < 1) || (level > 7)) {
1111 printk(KERN_ERR "Invalid interrupt level\n");
1115 if (bridge->irq_set == NULL) {
1116 printk(KERN_ERR "Configuring interrupts not supported\n");
1120 mutex_lock(&bridge->irq_mtx);
1122 if (bridge->irq[level - 1].callback[statid].func) {
1123 mutex_unlock(&bridge->irq_mtx);
1124 printk(KERN_WARNING "VME Interrupt already taken\n");
1128 bridge->irq[level - 1].count++;
1129 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1130 bridge->irq[level - 1].callback[statid].func = callback;
1132 /* Enable IRQ level */
1133 bridge->irq_set(bridge, level, 1, 1);
1135 mutex_unlock(&bridge->irq_mtx);
1139 EXPORT_SYMBOL(vme_irq_request);
1141 void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1143 struct vme_bridge *bridge;
1145 bridge = vdev->bridge;
1146 if (bridge == NULL) {
1147 printk(KERN_ERR "Can't find VME bus\n");
1151 if ((level < 1) || (level > 7)) {
1152 printk(KERN_ERR "Invalid interrupt level\n");
1156 if (bridge->irq_set == NULL) {
1157 printk(KERN_ERR "Configuring interrupts not supported\n");
1161 mutex_lock(&bridge->irq_mtx);
1163 bridge->irq[level - 1].count--;
1165 /* Disable IRQ level if no more interrupts attached at this level*/
1166 if (bridge->irq[level - 1].count == 0)
1167 bridge->irq_set(bridge, level, 0, 1);
1169 bridge->irq[level - 1].callback[statid].func = NULL;
1170 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1172 mutex_unlock(&bridge->irq_mtx);
1174 EXPORT_SYMBOL(vme_irq_free);
1176 int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1178 struct vme_bridge *bridge;
1180 bridge = vdev->bridge;
1181 if (bridge == NULL) {
1182 printk(KERN_ERR "Can't find VME bus\n");
1186 if ((level < 1) || (level > 7)) {
1187 printk(KERN_WARNING "Invalid interrupt level\n");
1191 if (bridge->irq_generate == NULL) {
1192 printk(KERN_WARNING "Interrupt generation not supported\n");
1196 return bridge->irq_generate(bridge, level, statid);
1198 EXPORT_SYMBOL(vme_irq_generate);
1201 * Request the location monitor, return resource or NULL
1203 struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1205 struct vme_bridge *bridge;
1206 struct list_head *lm_pos = NULL;
1207 struct vme_lm_resource *allocated_lm = NULL;
1208 struct vme_lm_resource *lm = NULL;
1209 struct vme_resource *resource = NULL;
1211 bridge = vdev->bridge;
1212 if (bridge == NULL) {
1213 printk(KERN_ERR "Can't find VME bus\n");
1217 /* Loop through DMA resources */
1218 list_for_each(lm_pos, &bridge->lm_resources) {
1219 lm = list_entry(lm_pos,
1220 struct vme_lm_resource, list);
1223 printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1227 /* Find an unlocked controller */
1228 mutex_lock(&lm->mtx);
1229 if (lm->locked == 0) {
1231 mutex_unlock(&lm->mtx);
1235 mutex_unlock(&lm->mtx);
1238 /* Check to see if we found a resource */
1239 if (allocated_lm == NULL)
1242 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1243 if (resource == NULL) {
1244 printk(KERN_ERR "Unable to allocate resource structure\n");
1247 resource->type = VME_LM;
1248 resource->entry = &allocated_lm->list;
1254 mutex_lock(&lm->mtx);
1256 mutex_unlock(&lm->mtx);
1261 EXPORT_SYMBOL(vme_lm_request);
1263 int vme_lm_count(struct vme_resource *resource)
1265 struct vme_lm_resource *lm;
1267 if (resource->type != VME_LM) {
1268 printk(KERN_ERR "Not a Location Monitor resource\n");
1272 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1274 return lm->monitors;
1276 EXPORT_SYMBOL(vme_lm_count);
1278 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1279 u32 aspace, u32 cycle)
1281 struct vme_bridge *bridge = find_bridge(resource);
1282 struct vme_lm_resource *lm;
1284 if (resource->type != VME_LM) {
1285 printk(KERN_ERR "Not a Location Monitor resource\n");
1289 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1291 if (bridge->lm_set == NULL) {
1292 printk(KERN_ERR "vme_lm_set not supported\n");
1296 return bridge->lm_set(lm, lm_base, aspace, cycle);
1298 EXPORT_SYMBOL(vme_lm_set);
1300 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1301 u32 *aspace, u32 *cycle)
1303 struct vme_bridge *bridge = find_bridge(resource);
1304 struct vme_lm_resource *lm;
1306 if (resource->type != VME_LM) {
1307 printk(KERN_ERR "Not a Location Monitor resource\n");
1311 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1313 if (bridge->lm_get == NULL) {
1314 printk(KERN_ERR "vme_lm_get not supported\n");
1318 return bridge->lm_get(lm, lm_base, aspace, cycle);
1320 EXPORT_SYMBOL(vme_lm_get);
1322 int vme_lm_attach(struct vme_resource *resource, int monitor,
1323 void (*callback)(void *), void *data)
1325 struct vme_bridge *bridge = find_bridge(resource);
1326 struct vme_lm_resource *lm;
1328 if (resource->type != VME_LM) {
1329 printk(KERN_ERR "Not a Location Monitor resource\n");
1333 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1335 if (bridge->lm_attach == NULL) {
1336 printk(KERN_ERR "vme_lm_attach not supported\n");
1340 return bridge->lm_attach(lm, monitor, callback, data);
1342 EXPORT_SYMBOL(vme_lm_attach);
1344 int vme_lm_detach(struct vme_resource *resource, int monitor)
1346 struct vme_bridge *bridge = find_bridge(resource);
1347 struct vme_lm_resource *lm;
1349 if (resource->type != VME_LM) {
1350 printk(KERN_ERR "Not a Location Monitor resource\n");
1354 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1356 if (bridge->lm_detach == NULL) {
1357 printk(KERN_ERR "vme_lm_detach not supported\n");
1361 return bridge->lm_detach(lm, monitor);
1363 EXPORT_SYMBOL(vme_lm_detach);
1365 void vme_lm_free(struct vme_resource *resource)
1367 struct vme_lm_resource *lm;
1369 if (resource->type != VME_LM) {
1370 printk(KERN_ERR "Not a Location Monitor resource\n");
1374 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1376 mutex_lock(&lm->mtx);
1379 * Check to see that there aren't any callbacks still attached, if
1380 * there are we should probably be detaching them!
1385 mutex_unlock(&lm->mtx);
1389 EXPORT_SYMBOL(vme_lm_free);
1391 int vme_slot_num(struct vme_dev *vdev)
1393 struct vme_bridge *bridge;
1395 bridge = vdev->bridge;
1396 if (bridge == NULL) {
1397 printk(KERN_ERR "Can't find VME bus\n");
1401 if (bridge->slot_get == NULL) {
1402 printk(KERN_WARNING "vme_slot_num not supported\n");
1406 return bridge->slot_get(bridge);
1408 EXPORT_SYMBOL(vme_slot_num);
1410 int vme_bus_num(struct vme_dev *vdev)
1412 struct vme_bridge *bridge;
1414 bridge = vdev->bridge;
1415 if (bridge == NULL) {
1416 pr_err("Can't find VME bus\n");
1422 EXPORT_SYMBOL(vme_bus_num);
1424 /* - Bridge Registration --------------------------------------------------- */
1426 static void vme_dev_release(struct device *dev)
1428 kfree(dev_to_vme_dev(dev));
1431 /* Common bridge initialization */
1432 struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
1434 INIT_LIST_HEAD(&bridge->vme_error_handlers);
1435 INIT_LIST_HEAD(&bridge->master_resources);
1436 INIT_LIST_HEAD(&bridge->slave_resources);
1437 INIT_LIST_HEAD(&bridge->dma_resources);
1438 INIT_LIST_HEAD(&bridge->lm_resources);
1439 mutex_init(&bridge->irq_mtx);
1443 EXPORT_SYMBOL(vme_init_bridge);
1445 int vme_register_bridge(struct vme_bridge *bridge)
1450 mutex_lock(&vme_buses_lock);
1451 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1452 if ((vme_bus_numbers & (1 << i)) == 0) {
1453 vme_bus_numbers |= (1 << i);
1455 INIT_LIST_HEAD(&bridge->devices);
1456 list_add_tail(&bridge->bus_list, &vme_bus_list);
1461 mutex_unlock(&vme_buses_lock);
1465 EXPORT_SYMBOL(vme_register_bridge);
1467 void vme_unregister_bridge(struct vme_bridge *bridge)
1469 struct vme_dev *vdev;
1470 struct vme_dev *tmp;
1472 mutex_lock(&vme_buses_lock);
1473 vme_bus_numbers &= ~(1 << bridge->num);
1474 list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1475 list_del(&vdev->drv_list);
1476 list_del(&vdev->bridge_list);
1477 device_unregister(&vdev->dev);
1479 list_del(&bridge->bus_list);
1480 mutex_unlock(&vme_buses_lock);
1482 EXPORT_SYMBOL(vme_unregister_bridge);
1484 /* - Driver Registration --------------------------------------------------- */
1486 static int __vme_register_driver_bus(struct vme_driver *drv,
1487 struct vme_bridge *bridge, unsigned int ndevs)
1491 struct vme_dev *vdev;
1492 struct vme_dev *tmp;
1494 for (i = 0; i < ndevs; i++) {
1495 vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1501 vdev->bridge = bridge;
1502 vdev->dev.platform_data = drv;
1503 vdev->dev.release = vme_dev_release;
1504 vdev->dev.parent = bridge->parent;
1505 vdev->dev.bus = &vme_bus_type;
1506 dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1509 err = device_register(&vdev->dev);
1513 if (vdev->dev.platform_data) {
1514 list_add_tail(&vdev->drv_list, &drv->devices);
1515 list_add_tail(&vdev->bridge_list, &bridge->devices);
1517 device_unregister(&vdev->dev);
1522 put_device(&vdev->dev);
1525 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1526 list_del(&vdev->drv_list);
1527 list_del(&vdev->bridge_list);
1528 device_unregister(&vdev->dev);
1533 static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1535 struct vme_bridge *bridge;
1538 mutex_lock(&vme_buses_lock);
1539 list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1541 * This cannot cause trouble as we already have vme_buses_lock
1542 * and if the bridge is removed, it will have to go through
1543 * vme_unregister_bridge() to do it (which calls remove() on
1544 * the bridge which in turn tries to acquire vme_buses_lock and
1545 * will have to wait).
1547 err = __vme_register_driver_bus(drv, bridge, ndevs);
1551 mutex_unlock(&vme_buses_lock);
1555 int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1559 drv->driver.name = drv->name;
1560 drv->driver.bus = &vme_bus_type;
1561 INIT_LIST_HEAD(&drv->devices);
1563 err = driver_register(&drv->driver);
1567 err = __vme_register_driver(drv, ndevs);
1569 driver_unregister(&drv->driver);
1573 EXPORT_SYMBOL(vme_register_driver);
1575 void vme_unregister_driver(struct vme_driver *drv)
1577 struct vme_dev *dev, *dev_tmp;
1579 mutex_lock(&vme_buses_lock);
1580 list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1581 list_del(&dev->drv_list);
1582 list_del(&dev->bridge_list);
1583 device_unregister(&dev->dev);
1585 mutex_unlock(&vme_buses_lock);
1587 driver_unregister(&drv->driver);
1589 EXPORT_SYMBOL(vme_unregister_driver);
1591 /* - Bus Registration ------------------------------------------------------ */
1593 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1595 struct vme_driver *vme_drv;
1597 vme_drv = container_of(drv, struct vme_driver, driver);
1599 if (dev->platform_data == vme_drv) {
1600 struct vme_dev *vdev = dev_to_vme_dev(dev);
1602 if (vme_drv->match && vme_drv->match(vdev))
1605 dev->platform_data = NULL;
1610 static int vme_bus_probe(struct device *dev)
1612 int retval = -ENODEV;
1613 struct vme_driver *driver;
1614 struct vme_dev *vdev = dev_to_vme_dev(dev);
1616 driver = dev->platform_data;
1618 if (driver->probe != NULL)
1619 retval = driver->probe(vdev);
1624 struct bus_type vme_bus_type = {
1626 .match = vme_bus_match,
1627 .probe = vme_bus_probe,
1629 EXPORT_SYMBOL(vme_bus_type);
1631 static int __init vme_init(void)
1633 return bus_register(&vme_bus_type);
1635 subsys_initcall(vme_init);