3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/log2.h>
39 #include <asm/shmparam.h>
42 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
43 struct drm_local_map *map)
45 struct drm_map_list *entry;
46 list_for_each_entry(entry, &dev->maplist, head) {
48 * Because the kernel-userspace ABI is fixed at a 32-bit offset
49 * while PCI resources may live above that, we ignore the map
50 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
51 * It is assumed that each driver will have only one resource of
55 map->type != entry->map->type ||
56 entry->master != dev->primary->master)
60 if (map->flags != _DRM_CONTAINS_LOCK)
63 case _DRM_FRAME_BUFFER:
65 default: /* Make gcc happy */
68 if (entry->map->offset == map->offset)
75 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
76 unsigned long user_token, int hashed_handle, int shm)
78 int use_hashed_handle, shift;
81 #if (BITS_PER_LONG == 64)
82 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
83 #elif (BITS_PER_LONG == 32)
84 use_hashed_handle = hashed_handle;
86 #error Unsupported long size. Neither 64 nor 32 bits.
89 if (!use_hashed_handle) {
91 hash->key = user_token >> PAGE_SHIFT;
92 ret = drm_ht_insert_item(&dev->map_hash, hash);
98 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
99 if (shm && (SHMLBA > PAGE_SIZE)) {
100 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
102 /* For shared memory, we have to preserve the SHMLBA
103 * bits of the eventual vma->vm_pgoff value during
104 * mmap(). Otherwise we run into cache aliasing problems
105 * on some platforms. On these platforms, the pgoff of
106 * a mmap() request is used to pick a suitable virtual
107 * address for the mmap() region such that it will not
108 * cause cache aliasing problems.
110 * Therefore, make sure the SHMLBA relevant bits of the
111 * hash value we use are equal to those in the original
112 * kernel virtual address.
115 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
118 return drm_ht_just_insert_please(&dev->map_hash, hash,
119 user_token, 32 - PAGE_SHIFT - 3,
124 * Core function to create a range of memory available for mapping by a
127 * Adjusts the memory offset to its absolute value according to the mapping
128 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
129 * applicable and if supported by the kernel.
131 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
132 unsigned int size, enum drm_map_type type,
133 enum drm_map_flags flags,
134 struct drm_map_list ** maplist)
136 struct drm_local_map *map;
137 struct drm_map_list *list;
138 drm_dma_handle_t *dmah;
139 unsigned long user_token;
142 map = kmalloc(sizeof(*map), GFP_KERNEL);
146 map->offset = offset;
151 /* Only allow shared memory to be removable since we only keep enough
152 * book keeping information about shared memory to allow for removal
153 * when processes fork.
155 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
159 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
160 (unsigned long long)map->offset, map->size, map->type);
162 /* page-align _DRM_SHM maps. They are allocated here so there is no security
163 * hole created by that and it works around various broken drivers that use
164 * a non-aligned quantity to map the SAREA. --BenH
166 if (map->type == _DRM_SHM)
167 map->size = PAGE_ALIGN(map->size);
169 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
178 case _DRM_FRAME_BUFFER:
179 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
180 if (map->offset + (map->size-1) < map->offset ||
181 map->offset < virt_to_phys(high_memory)) {
187 map->offset += dev->hose->mem_space->start;
189 /* Some drivers preinitialize some maps, without the X Server
190 * needing to be aware of it. Therefore, we just return success
191 * when the server tries to create a duplicate map.
193 list = drm_find_matching_map(dev, map);
195 if (list->map->size != map->size) {
196 DRM_DEBUG("Matching maps of type %d with "
197 "mismatched sizes, (%ld vs %ld)\n",
198 map->type, map->size,
200 list->map->size = map->size;
208 if (drm_core_has_MTRR(dev)) {
209 if (map->type == _DRM_FRAME_BUFFER ||
210 (map->flags & _DRM_WRITE_COMBINING)) {
211 map->mtrr = mtrr_add(map->offset, map->size,
212 MTRR_TYPE_WRCOMB, 1);
215 if (map->type == _DRM_REGISTERS) {
216 map->handle = ioremap(map->offset, map->size);
225 list = drm_find_matching_map(dev, map);
227 if(list->map->size != map->size) {
228 DRM_DEBUG("Matching maps of type %d with "
229 "mismatched sizes, (%ld vs %ld)\n",
230 map->type, map->size, list->map->size);
231 list->map->size = map->size;
238 map->handle = vmalloc_user(map->size);
239 DRM_DEBUG("%lu %d %p\n",
240 map->size, drm_order(map->size), map->handle);
245 map->offset = (unsigned long)map->handle;
246 if (map->flags & _DRM_CONTAINS_LOCK) {
247 /* Prevent a 2nd X Server from creating a 2nd lock */
248 if (dev->primary->master->lock.hw_lock != NULL) {
253 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
257 struct drm_agp_mem *entry;
260 if (!drm_core_has_AGP(dev)) {
265 map->offset += dev->hose->mem_space->start;
267 /* In some cases (i810 driver), user space may have already
268 * added the AGP base itself, because dev->agp->base previously
269 * only got set during AGP enable. So, only add the base
270 * address if the map's offset isn't already within the
273 if (map->offset < dev->agp->base ||
274 map->offset > dev->agp->base +
275 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
276 map->offset += dev->agp->base;
278 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
280 /* This assumes the DRM is in total control of AGP space.
281 * It's not always the case as AGP can be in the control
282 * of user space (i.e. i810 driver). So this loop will get
283 * skipped and we double check that dev->agp->memory is
284 * actually set as well as being invalid before EPERM'ing
286 list_for_each_entry(entry, &dev->agp->memory, head) {
287 if ((map->offset >= entry->bound) &&
288 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
293 if (!list_empty(&dev->agp->memory) && !valid) {
297 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
298 (unsigned long long)map->offset, map->size);
303 DRM_ERROR("tried to addmap GEM object\n");
305 case _DRM_SCATTER_GATHER:
310 map->offset += (unsigned long)dev->sg->virtual;
312 case _DRM_CONSISTENT:
313 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
314 * As we're limiting the address to 2^32-1 (or less),
315 * casting it down to 32 bits is no problem, but we
316 * need to point to a 64bit variable first. */
317 dmah = drm_pci_alloc(dev, map->size, map->size);
322 map->handle = dmah->vaddr;
323 map->offset = (unsigned long)dmah->busaddr;
331 list = kmalloc(sizeof(*list), GFP_KERNEL);
333 if (map->type == _DRM_REGISTERS)
334 iounmap(map->handle);
338 memset(list, 0, sizeof(*list));
341 mutex_lock(&dev->struct_mutex);
342 list_add(&list->head, &dev->maplist);
344 /* Assign a 32-bit handle */
345 /* We do it here so that dev->struct_mutex protects the increment */
346 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
348 ret = drm_map_handle(dev, &list->hash, user_token, 0,
349 (map->type == _DRM_SHM));
351 if (map->type == _DRM_REGISTERS)
352 iounmap(map->handle);
355 mutex_unlock(&dev->struct_mutex);
359 list->user_token = list->hash.key << PAGE_SHIFT;
360 mutex_unlock(&dev->struct_mutex);
362 if (!(map->flags & _DRM_DRIVER))
363 list->master = dev->primary->master;
368 int drm_addmap(struct drm_device * dev, resource_size_t offset,
369 unsigned int size, enum drm_map_type type,
370 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
372 struct drm_map_list *list;
375 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
377 *map_ptr = list->map;
381 EXPORT_SYMBOL(drm_addmap);
384 * Ioctl to specify a range of memory that is available for mapping by a
387 * \param inode device inode.
388 * \param file_priv DRM file private.
389 * \param cmd command.
390 * \param arg pointer to a drm_map structure.
391 * \return zero on success or a negative value on error.
394 int drm_addmap_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file_priv)
397 struct drm_map *map = data;
398 struct drm_map_list *maplist;
401 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
404 err = drm_addmap_core(dev, map->offset, map->size, map->type,
405 map->flags, &maplist);
410 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
411 map->handle = (void *)(unsigned long)maplist->user_token;
416 * Remove a map private from list and deallocate resources if the mapping
419 * Searches the map on drm_device::maplist, removes it from the list, see if
420 * its being used, and free any associate resource (such as MTRR's) if it's not
425 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
427 struct drm_map_list *r_list = NULL, *list_t;
428 drm_dma_handle_t dmah;
430 struct drm_master *master;
432 /* Find the list entry for the map and remove it */
433 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
434 if (r_list->map == map) {
435 master = r_list->master;
436 list_del(&r_list->head);
437 drm_ht_remove_key(&dev->map_hash,
438 r_list->user_token >> PAGE_SHIFT);
450 iounmap(map->handle);
452 case _DRM_FRAME_BUFFER:
453 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
455 retcode = mtrr_del(map->mtrr, map->offset, map->size);
456 DRM_DEBUG("mtrr_del=%d\n", retcode);
462 if (dev->sigdata.lock == master->lock.hw_lock)
463 dev->sigdata.lock = NULL;
464 master->lock.hw_lock = NULL; /* SHM removed */
465 master->lock.file_priv = NULL;
466 wake_up_interruptible_all(&master->lock.lock_queue);
470 case _DRM_SCATTER_GATHER:
472 case _DRM_CONSISTENT:
473 dmah.vaddr = map->handle;
474 dmah.busaddr = map->offset;
475 dmah.size = map->size;
476 __drm_pci_free(dev, &dmah);
479 DRM_ERROR("tried to rmmap GEM object\n");
486 EXPORT_SYMBOL(drm_rmmap_locked);
488 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
492 mutex_lock(&dev->struct_mutex);
493 ret = drm_rmmap_locked(dev, map);
494 mutex_unlock(&dev->struct_mutex);
498 EXPORT_SYMBOL(drm_rmmap);
500 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
501 * the last close of the device, and this is necessary for cleanup when things
502 * exit uncleanly. Therefore, having userland manually remove mappings seems
503 * like a pointless exercise since they're going away anyway.
505 * One use case might be after addmap is allowed for normal users for SHM and
506 * gets used by drivers that the server doesn't need to care about. This seems
509 * \param inode device inode.
510 * \param file_priv DRM file private.
511 * \param cmd command.
512 * \param arg pointer to a struct drm_map structure.
513 * \return zero on success or a negative value on error.
515 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
518 struct drm_map *request = data;
519 struct drm_local_map *map = NULL;
520 struct drm_map_list *r_list;
523 mutex_lock(&dev->struct_mutex);
524 list_for_each_entry(r_list, &dev->maplist, head) {
526 r_list->user_token == (unsigned long)request->handle &&
527 r_list->map->flags & _DRM_REMOVABLE) {
533 /* List has wrapped around to the head pointer, or its empty we didn't
536 if (list_empty(&dev->maplist) || !map) {
537 mutex_unlock(&dev->struct_mutex);
541 /* Register and framebuffer maps are permanent */
542 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
543 mutex_unlock(&dev->struct_mutex);
547 ret = drm_rmmap_locked(dev, map);
549 mutex_unlock(&dev->struct_mutex);
555 * Cleanup after an error on one of the addbufs() functions.
557 * \param dev DRM device.
558 * \param entry buffer entry where the error occurred.
560 * Frees any pages and buffers associated with the given entry.
562 static void drm_cleanup_buf_error(struct drm_device * dev,
563 struct drm_buf_entry * entry)
567 if (entry->seg_count) {
568 for (i = 0; i < entry->seg_count; i++) {
569 if (entry->seglist[i]) {
570 drm_pci_free(dev, entry->seglist[i]);
573 kfree(entry->seglist);
575 entry->seg_count = 0;
578 if (entry->buf_count) {
579 for (i = 0; i < entry->buf_count; i++) {
580 kfree(entry->buflist[i].dev_private);
582 kfree(entry->buflist);
584 entry->buf_count = 0;
590 * Add AGP buffers for DMA transfers.
592 * \param dev struct drm_device to which the buffers are to be added.
593 * \param request pointer to a struct drm_buf_desc describing the request.
594 * \return zero on success or a negative number on failure.
596 * After some sanity checks creates a drm_buf structure for each buffer and
597 * reallocates the buffer list of the same size order to accommodate the new
600 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
602 struct drm_device_dma *dma = dev->dma;
603 struct drm_buf_entry *entry;
604 struct drm_agp_mem *agp_entry;
606 unsigned long offset;
607 unsigned long agp_offset;
616 struct drm_buf **temp_buflist;
621 count = request->count;
622 order = drm_order(request->size);
625 alignment = (request->flags & _DRM_PAGE_ALIGN)
626 ? PAGE_ALIGN(size) : size;
627 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
628 total = PAGE_SIZE << page_order;
631 agp_offset = dev->agp->base + request->agp_start;
633 DRM_DEBUG("count: %d\n", count);
634 DRM_DEBUG("order: %d\n", order);
635 DRM_DEBUG("size: %d\n", size);
636 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
637 DRM_DEBUG("alignment: %d\n", alignment);
638 DRM_DEBUG("page_order: %d\n", page_order);
639 DRM_DEBUG("total: %d\n", total);
641 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
643 if (dev->queue_count)
644 return -EBUSY; /* Not while in use */
646 /* Make sure buffers are located in AGP memory that we own */
648 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
649 if ((agp_offset >= agp_entry->bound) &&
650 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
655 if (!list_empty(&dev->agp->memory) && !valid) {
656 DRM_DEBUG("zone invalid\n");
659 spin_lock(&dev->count_lock);
661 spin_unlock(&dev->count_lock);
664 atomic_inc(&dev->buf_alloc);
665 spin_unlock(&dev->count_lock);
667 mutex_lock(&dev->struct_mutex);
668 entry = &dma->bufs[order];
669 if (entry->buf_count) {
670 mutex_unlock(&dev->struct_mutex);
671 atomic_dec(&dev->buf_alloc);
672 return -ENOMEM; /* May only call once for each order */
675 if (count < 0 || count > 4096) {
676 mutex_unlock(&dev->struct_mutex);
677 atomic_dec(&dev->buf_alloc);
681 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
682 if (!entry->buflist) {
683 mutex_unlock(&dev->struct_mutex);
684 atomic_dec(&dev->buf_alloc);
687 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
689 entry->buf_size = size;
690 entry->page_order = page_order;
694 while (entry->buf_count < count) {
695 buf = &entry->buflist[entry->buf_count];
696 buf->idx = dma->buf_count + entry->buf_count;
697 buf->total = alignment;
701 buf->offset = (dma->byte_count + offset);
702 buf->bus_address = agp_offset + offset;
703 buf->address = (void *)(agp_offset + offset);
707 init_waitqueue_head(&buf->dma_wait);
708 buf->file_priv = NULL;
710 buf->dev_priv_size = dev->driver->dev_priv_size;
711 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
712 if (!buf->dev_private) {
713 /* Set count correctly so we free the proper amount. */
714 entry->buf_count = count;
715 drm_cleanup_buf_error(dev, entry);
716 mutex_unlock(&dev->struct_mutex);
717 atomic_dec(&dev->buf_alloc);
720 memset(buf->dev_private, 0, buf->dev_priv_size);
722 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
726 byte_count += PAGE_SIZE << page_order;
729 DRM_DEBUG("byte_count: %d\n", byte_count);
731 temp_buflist = krealloc(dma->buflist,
732 (dma->buf_count + entry->buf_count) *
733 sizeof(*dma->buflist), GFP_KERNEL);
735 /* Free the entry because it isn't valid */
736 drm_cleanup_buf_error(dev, entry);
737 mutex_unlock(&dev->struct_mutex);
738 atomic_dec(&dev->buf_alloc);
741 dma->buflist = temp_buflist;
743 for (i = 0; i < entry->buf_count; i++) {
744 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
747 dma->buf_count += entry->buf_count;
748 dma->seg_count += entry->seg_count;
749 dma->page_count += byte_count >> PAGE_SHIFT;
750 dma->byte_count += byte_count;
752 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
753 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
755 mutex_unlock(&dev->struct_mutex);
757 request->count = entry->buf_count;
758 request->size = size;
760 dma->flags = _DRM_DMA_USE_AGP;
762 atomic_dec(&dev->buf_alloc);
765 EXPORT_SYMBOL(drm_addbufs_agp);
766 #endif /* __OS_HAS_AGP */
768 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
770 struct drm_device_dma *dma = dev->dma;
776 struct drm_buf_entry *entry;
777 drm_dma_handle_t *dmah;
780 unsigned long offset;
784 unsigned long *temp_pagelist;
785 struct drm_buf **temp_buflist;
787 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
793 if (!capable(CAP_SYS_ADMIN))
796 count = request->count;
797 order = drm_order(request->size);
800 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
801 request->count, request->size, size, order, dev->queue_count);
803 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
805 if (dev->queue_count)
806 return -EBUSY; /* Not while in use */
808 alignment = (request->flags & _DRM_PAGE_ALIGN)
809 ? PAGE_ALIGN(size) : size;
810 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
811 total = PAGE_SIZE << page_order;
813 spin_lock(&dev->count_lock);
815 spin_unlock(&dev->count_lock);
818 atomic_inc(&dev->buf_alloc);
819 spin_unlock(&dev->count_lock);
821 mutex_lock(&dev->struct_mutex);
822 entry = &dma->bufs[order];
823 if (entry->buf_count) {
824 mutex_unlock(&dev->struct_mutex);
825 atomic_dec(&dev->buf_alloc);
826 return -ENOMEM; /* May only call once for each order */
829 if (count < 0 || count > 4096) {
830 mutex_unlock(&dev->struct_mutex);
831 atomic_dec(&dev->buf_alloc);
835 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
836 if (!entry->buflist) {
837 mutex_unlock(&dev->struct_mutex);
838 atomic_dec(&dev->buf_alloc);
841 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
843 entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
844 if (!entry->seglist) {
845 kfree(entry->buflist);
846 mutex_unlock(&dev->struct_mutex);
847 atomic_dec(&dev->buf_alloc);
850 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
852 /* Keep the original pagelist until we know all the allocations
855 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
856 sizeof(*dma->pagelist), GFP_KERNEL);
857 if (!temp_pagelist) {
858 kfree(entry->buflist);
859 kfree(entry->seglist);
860 mutex_unlock(&dev->struct_mutex);
861 atomic_dec(&dev->buf_alloc);
864 memcpy(temp_pagelist,
865 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
866 DRM_DEBUG("pagelist: %d entries\n",
867 dma->page_count + (count << page_order));
869 entry->buf_size = size;
870 entry->page_order = page_order;
874 while (entry->buf_count < count) {
876 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
879 /* Set count correctly so we free the proper amount. */
880 entry->buf_count = count;
881 entry->seg_count = count;
882 drm_cleanup_buf_error(dev, entry);
883 kfree(temp_pagelist);
884 mutex_unlock(&dev->struct_mutex);
885 atomic_dec(&dev->buf_alloc);
888 entry->seglist[entry->seg_count++] = dmah;
889 for (i = 0; i < (1 << page_order); i++) {
890 DRM_DEBUG("page %d @ 0x%08lx\n",
891 dma->page_count + page_count,
892 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
893 temp_pagelist[dma->page_count + page_count++]
894 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
897 offset + size <= total && entry->buf_count < count;
898 offset += alignment, ++entry->buf_count) {
899 buf = &entry->buflist[entry->buf_count];
900 buf->idx = dma->buf_count + entry->buf_count;
901 buf->total = alignment;
904 buf->offset = (dma->byte_count + byte_count + offset);
905 buf->address = (void *)(dmah->vaddr + offset);
906 buf->bus_address = dmah->busaddr + offset;
910 init_waitqueue_head(&buf->dma_wait);
911 buf->file_priv = NULL;
913 buf->dev_priv_size = dev->driver->dev_priv_size;
914 buf->dev_private = kmalloc(buf->dev_priv_size,
916 if (!buf->dev_private) {
917 /* Set count correctly so we free the proper amount. */
918 entry->buf_count = count;
919 entry->seg_count = count;
920 drm_cleanup_buf_error(dev, entry);
921 kfree(temp_pagelist);
922 mutex_unlock(&dev->struct_mutex);
923 atomic_dec(&dev->buf_alloc);
926 memset(buf->dev_private, 0, buf->dev_priv_size);
928 DRM_DEBUG("buffer %d @ %p\n",
929 entry->buf_count, buf->address);
931 byte_count += PAGE_SIZE << page_order;
934 temp_buflist = krealloc(dma->buflist,
935 (dma->buf_count + entry->buf_count) *
936 sizeof(*dma->buflist), GFP_KERNEL);
938 /* Free the entry because it isn't valid */
939 drm_cleanup_buf_error(dev, entry);
940 kfree(temp_pagelist);
941 mutex_unlock(&dev->struct_mutex);
942 atomic_dec(&dev->buf_alloc);
945 dma->buflist = temp_buflist;
947 for (i = 0; i < entry->buf_count; i++) {
948 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
951 /* No allocations failed, so now we can replace the original pagelist
954 if (dma->page_count) {
955 kfree(dma->pagelist);
957 dma->pagelist = temp_pagelist;
959 dma->buf_count += entry->buf_count;
960 dma->seg_count += entry->seg_count;
961 dma->page_count += entry->seg_count << page_order;
962 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
964 mutex_unlock(&dev->struct_mutex);
966 request->count = entry->buf_count;
967 request->size = size;
969 if (request->flags & _DRM_PCI_BUFFER_RO)
970 dma->flags = _DRM_DMA_USE_PCI_RO;
972 atomic_dec(&dev->buf_alloc);
976 EXPORT_SYMBOL(drm_addbufs_pci);
978 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
980 struct drm_device_dma *dma = dev->dma;
981 struct drm_buf_entry *entry;
983 unsigned long offset;
984 unsigned long agp_offset;
993 struct drm_buf **temp_buflist;
995 if (!drm_core_check_feature(dev, DRIVER_SG))
1001 if (!capable(CAP_SYS_ADMIN))
1004 count = request->count;
1005 order = drm_order(request->size);
1008 alignment = (request->flags & _DRM_PAGE_ALIGN)
1009 ? PAGE_ALIGN(size) : size;
1010 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1011 total = PAGE_SIZE << page_order;
1014 agp_offset = request->agp_start;
1016 DRM_DEBUG("count: %d\n", count);
1017 DRM_DEBUG("order: %d\n", order);
1018 DRM_DEBUG("size: %d\n", size);
1019 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1020 DRM_DEBUG("alignment: %d\n", alignment);
1021 DRM_DEBUG("page_order: %d\n", page_order);
1022 DRM_DEBUG("total: %d\n", total);
1024 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1026 if (dev->queue_count)
1027 return -EBUSY; /* Not while in use */
1029 spin_lock(&dev->count_lock);
1031 spin_unlock(&dev->count_lock);
1034 atomic_inc(&dev->buf_alloc);
1035 spin_unlock(&dev->count_lock);
1037 mutex_lock(&dev->struct_mutex);
1038 entry = &dma->bufs[order];
1039 if (entry->buf_count) {
1040 mutex_unlock(&dev->struct_mutex);
1041 atomic_dec(&dev->buf_alloc);
1042 return -ENOMEM; /* May only call once for each order */
1045 if (count < 0 || count > 4096) {
1046 mutex_unlock(&dev->struct_mutex);
1047 atomic_dec(&dev->buf_alloc);
1051 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
1053 if (!entry->buflist) {
1054 mutex_unlock(&dev->struct_mutex);
1055 atomic_dec(&dev->buf_alloc);
1058 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1060 entry->buf_size = size;
1061 entry->page_order = page_order;
1065 while (entry->buf_count < count) {
1066 buf = &entry->buflist[entry->buf_count];
1067 buf->idx = dma->buf_count + entry->buf_count;
1068 buf->total = alignment;
1072 buf->offset = (dma->byte_count + offset);
1073 buf->bus_address = agp_offset + offset;
1074 buf->address = (void *)(agp_offset + offset
1075 + (unsigned long)dev->sg->virtual);
1079 init_waitqueue_head(&buf->dma_wait);
1080 buf->file_priv = NULL;
1082 buf->dev_priv_size = dev->driver->dev_priv_size;
1083 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
1084 if (!buf->dev_private) {
1085 /* Set count correctly so we free the proper amount. */
1086 entry->buf_count = count;
1087 drm_cleanup_buf_error(dev, entry);
1088 mutex_unlock(&dev->struct_mutex);
1089 atomic_dec(&dev->buf_alloc);
1093 memset(buf->dev_private, 0, buf->dev_priv_size);
1095 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1097 offset += alignment;
1099 byte_count += PAGE_SIZE << page_order;
1102 DRM_DEBUG("byte_count: %d\n", byte_count);
1104 temp_buflist = krealloc(dma->buflist,
1105 (dma->buf_count + entry->buf_count) *
1106 sizeof(*dma->buflist), GFP_KERNEL);
1107 if (!temp_buflist) {
1108 /* Free the entry because it isn't valid */
1109 drm_cleanup_buf_error(dev, entry);
1110 mutex_unlock(&dev->struct_mutex);
1111 atomic_dec(&dev->buf_alloc);
1114 dma->buflist = temp_buflist;
1116 for (i = 0; i < entry->buf_count; i++) {
1117 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1120 dma->buf_count += entry->buf_count;
1121 dma->seg_count += entry->seg_count;
1122 dma->page_count += byte_count >> PAGE_SHIFT;
1123 dma->byte_count += byte_count;
1125 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1126 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1128 mutex_unlock(&dev->struct_mutex);
1130 request->count = entry->buf_count;
1131 request->size = size;
1133 dma->flags = _DRM_DMA_USE_SG;
1135 atomic_dec(&dev->buf_alloc);
1139 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1141 struct drm_device_dma *dma = dev->dma;
1142 struct drm_buf_entry *entry;
1143 struct drm_buf *buf;
1144 unsigned long offset;
1145 unsigned long agp_offset;
1154 struct drm_buf **temp_buflist;
1156 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1162 if (!capable(CAP_SYS_ADMIN))
1165 count = request->count;
1166 order = drm_order(request->size);
1169 alignment = (request->flags & _DRM_PAGE_ALIGN)
1170 ? PAGE_ALIGN(size) : size;
1171 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1172 total = PAGE_SIZE << page_order;
1175 agp_offset = request->agp_start;
1177 DRM_DEBUG("count: %d\n", count);
1178 DRM_DEBUG("order: %d\n", order);
1179 DRM_DEBUG("size: %d\n", size);
1180 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1181 DRM_DEBUG("alignment: %d\n", alignment);
1182 DRM_DEBUG("page_order: %d\n", page_order);
1183 DRM_DEBUG("total: %d\n", total);
1185 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1187 if (dev->queue_count)
1188 return -EBUSY; /* Not while in use */
1190 spin_lock(&dev->count_lock);
1192 spin_unlock(&dev->count_lock);
1195 atomic_inc(&dev->buf_alloc);
1196 spin_unlock(&dev->count_lock);
1198 mutex_lock(&dev->struct_mutex);
1199 entry = &dma->bufs[order];
1200 if (entry->buf_count) {
1201 mutex_unlock(&dev->struct_mutex);
1202 atomic_dec(&dev->buf_alloc);
1203 return -ENOMEM; /* May only call once for each order */
1206 if (count < 0 || count > 4096) {
1207 mutex_unlock(&dev->struct_mutex);
1208 atomic_dec(&dev->buf_alloc);
1212 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
1214 if (!entry->buflist) {
1215 mutex_unlock(&dev->struct_mutex);
1216 atomic_dec(&dev->buf_alloc);
1219 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1221 entry->buf_size = size;
1222 entry->page_order = page_order;
1226 while (entry->buf_count < count) {
1227 buf = &entry->buflist[entry->buf_count];
1228 buf->idx = dma->buf_count + entry->buf_count;
1229 buf->total = alignment;
1233 buf->offset = (dma->byte_count + offset);
1234 buf->bus_address = agp_offset + offset;
1235 buf->address = (void *)(agp_offset + offset);
1239 init_waitqueue_head(&buf->dma_wait);
1240 buf->file_priv = NULL;
1242 buf->dev_priv_size = dev->driver->dev_priv_size;
1243 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
1244 if (!buf->dev_private) {
1245 /* Set count correctly so we free the proper amount. */
1246 entry->buf_count = count;
1247 drm_cleanup_buf_error(dev, entry);
1248 mutex_unlock(&dev->struct_mutex);
1249 atomic_dec(&dev->buf_alloc);
1252 memset(buf->dev_private, 0, buf->dev_priv_size);
1254 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1256 offset += alignment;
1258 byte_count += PAGE_SIZE << page_order;
1261 DRM_DEBUG("byte_count: %d\n", byte_count);
1263 temp_buflist = krealloc(dma->buflist,
1264 (dma->buf_count + entry->buf_count) *
1265 sizeof(*dma->buflist), GFP_KERNEL);
1266 if (!temp_buflist) {
1267 /* Free the entry because it isn't valid */
1268 drm_cleanup_buf_error(dev, entry);
1269 mutex_unlock(&dev->struct_mutex);
1270 atomic_dec(&dev->buf_alloc);
1273 dma->buflist = temp_buflist;
1275 for (i = 0; i < entry->buf_count; i++) {
1276 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1279 dma->buf_count += entry->buf_count;
1280 dma->seg_count += entry->seg_count;
1281 dma->page_count += byte_count >> PAGE_SHIFT;
1282 dma->byte_count += byte_count;
1284 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1285 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1287 mutex_unlock(&dev->struct_mutex);
1289 request->count = entry->buf_count;
1290 request->size = size;
1292 dma->flags = _DRM_DMA_USE_FB;
1294 atomic_dec(&dev->buf_alloc);
1300 * Add buffers for DMA transfers (ioctl).
1302 * \param inode device inode.
1303 * \param file_priv DRM file private.
1304 * \param cmd command.
1305 * \param arg pointer to a struct drm_buf_desc request.
1306 * \return zero on success or a negative number on failure.
1308 * According with the memory type specified in drm_buf_desc::flags and the
1309 * build options, it dispatches the call either to addbufs_agp(),
1310 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1311 * PCI memory respectively.
1313 int drm_addbufs(struct drm_device *dev, void *data,
1314 struct drm_file *file_priv)
1316 struct drm_buf_desc *request = data;
1319 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1323 if (request->flags & _DRM_AGP_BUFFER)
1324 ret = drm_addbufs_agp(dev, request);
1327 if (request->flags & _DRM_SG_BUFFER)
1328 ret = drm_addbufs_sg(dev, request);
1329 else if (request->flags & _DRM_FB_BUFFER)
1330 ret = drm_addbufs_fb(dev, request);
1332 ret = drm_addbufs_pci(dev, request);
1338 * Get information about the buffer mappings.
1340 * This was originally mean for debugging purposes, or by a sophisticated
1341 * client library to determine how best to use the available buffers (e.g.,
1342 * large buffers can be used for image transfer).
1344 * \param inode device inode.
1345 * \param file_priv DRM file private.
1346 * \param cmd command.
1347 * \param arg pointer to a drm_buf_info structure.
1348 * \return zero on success or a negative number on failure.
1350 * Increments drm_device::buf_use while holding the drm_device::count_lock
1351 * lock, preventing of allocating more buffers after this call. Information
1352 * about each requested buffer is then copied into user space.
1354 int drm_infobufs(struct drm_device *dev, void *data,
1355 struct drm_file *file_priv)
1357 struct drm_device_dma *dma = dev->dma;
1358 struct drm_buf_info *request = data;
1362 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1368 spin_lock(&dev->count_lock);
1369 if (atomic_read(&dev->buf_alloc)) {
1370 spin_unlock(&dev->count_lock);
1373 ++dev->buf_use; /* Can't allocate more after this call */
1374 spin_unlock(&dev->count_lock);
1376 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1377 if (dma->bufs[i].buf_count)
1381 DRM_DEBUG("count = %d\n", count);
1383 if (request->count >= count) {
1384 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1385 if (dma->bufs[i].buf_count) {
1386 struct drm_buf_desc __user *to =
1387 &request->list[count];
1388 struct drm_buf_entry *from = &dma->bufs[i];
1389 struct drm_freelist *list = &dma->bufs[i].freelist;
1390 if (copy_to_user(&to->count,
1392 sizeof(from->buf_count)) ||
1393 copy_to_user(&to->size,
1395 sizeof(from->buf_size)) ||
1396 copy_to_user(&to->low_mark,
1398 sizeof(list->low_mark)) ||
1399 copy_to_user(&to->high_mark,
1401 sizeof(list->high_mark)))
1404 DRM_DEBUG("%d %d %d %d %d\n",
1406 dma->bufs[i].buf_count,
1407 dma->bufs[i].buf_size,
1408 dma->bufs[i].freelist.low_mark,
1409 dma->bufs[i].freelist.high_mark);
1414 request->count = count;
1420 * Specifies a low and high water mark for buffer allocation
1422 * \param inode device inode.
1423 * \param file_priv DRM file private.
1424 * \param cmd command.
1425 * \param arg a pointer to a drm_buf_desc structure.
1426 * \return zero on success or a negative number on failure.
1428 * Verifies that the size order is bounded between the admissible orders and
1429 * updates the respective drm_device_dma::bufs entry low and high water mark.
1431 * \note This ioctl is deprecated and mostly never used.
1433 int drm_markbufs(struct drm_device *dev, void *data,
1434 struct drm_file *file_priv)
1436 struct drm_device_dma *dma = dev->dma;
1437 struct drm_buf_desc *request = data;
1439 struct drm_buf_entry *entry;
1441 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1447 DRM_DEBUG("%d, %d, %d\n",
1448 request->size, request->low_mark, request->high_mark);
1449 order = drm_order(request->size);
1450 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1452 entry = &dma->bufs[order];
1454 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1456 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1459 entry->freelist.low_mark = request->low_mark;
1460 entry->freelist.high_mark = request->high_mark;
1466 * Unreserve the buffers in list, previously reserved using drmDMA.
1468 * \param inode device inode.
1469 * \param file_priv DRM file private.
1470 * \param cmd command.
1471 * \param arg pointer to a drm_buf_free structure.
1472 * \return zero on success or a negative number on failure.
1474 * Calls free_buffer() for each used buffer.
1475 * This function is primarily used for debugging.
1477 int drm_freebufs(struct drm_device *dev, void *data,
1478 struct drm_file *file_priv)
1480 struct drm_device_dma *dma = dev->dma;
1481 struct drm_buf_free *request = data;
1484 struct drm_buf *buf;
1486 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1492 DRM_DEBUG("%d\n", request->count);
1493 for (i = 0; i < request->count; i++) {
1494 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1496 if (idx < 0 || idx >= dma->buf_count) {
1497 DRM_ERROR("Index %d (of %d max)\n",
1498 idx, dma->buf_count - 1);
1501 buf = dma->buflist[idx];
1502 if (buf->file_priv != file_priv) {
1503 DRM_ERROR("Process %d freeing buffer not owned\n",
1504 task_pid_nr(current));
1507 drm_free_buffer(dev, buf);
1514 * Maps all of the DMA buffers into client-virtual space (ioctl).
1516 * \param inode device inode.
1517 * \param file_priv DRM file private.
1518 * \param cmd command.
1519 * \param arg pointer to a drm_buf_map structure.
1520 * \return zero on success or a negative number on failure.
1522 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1523 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1524 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1527 int drm_mapbufs(struct drm_device *dev, void *data,
1528 struct drm_file *file_priv)
1530 struct drm_device_dma *dma = dev->dma;
1533 unsigned long virtual;
1534 unsigned long address;
1535 struct drm_buf_map *request = data;
1538 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1544 spin_lock(&dev->count_lock);
1545 if (atomic_read(&dev->buf_alloc)) {
1546 spin_unlock(&dev->count_lock);
1549 dev->buf_use++; /* Can't allocate more after this call */
1550 spin_unlock(&dev->count_lock);
1552 if (request->count >= dma->buf_count) {
1553 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1554 || (drm_core_check_feature(dev, DRIVER_SG)
1555 && (dma->flags & _DRM_DMA_USE_SG))
1556 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1557 && (dma->flags & _DRM_DMA_USE_FB))) {
1558 struct drm_local_map *map = dev->agp_buffer_map;
1559 unsigned long token = dev->agp_buffer_token;
1565 down_write(¤t->mm->mmap_sem);
1566 virtual = do_mmap(file_priv->filp, 0, map->size,
1567 PROT_READ | PROT_WRITE,
1570 up_write(¤t->mm->mmap_sem);
1572 down_write(¤t->mm->mmap_sem);
1573 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1574 PROT_READ | PROT_WRITE,
1576 up_write(¤t->mm->mmap_sem);
1578 if (virtual > -1024UL) {
1580 retcode = (signed long)virtual;
1583 request->virtual = (void __user *)virtual;
1585 for (i = 0; i < dma->buf_count; i++) {
1586 if (copy_to_user(&request->list[i].idx,
1587 &dma->buflist[i]->idx,
1588 sizeof(request->list[0].idx))) {
1592 if (copy_to_user(&request->list[i].total,
1593 &dma->buflist[i]->total,
1594 sizeof(request->list[0].total))) {
1598 if (copy_to_user(&request->list[i].used,
1599 &zero, sizeof(zero))) {
1603 address = virtual + dma->buflist[i]->offset; /* *** */
1604 if (copy_to_user(&request->list[i].address,
1605 &address, sizeof(address))) {
1612 request->count = dma->buf_count;
1613 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1619 * Compute size order. Returns the exponent of the smaller power of two which
1620 * is greater or equal to given number.
1625 * \todo Can be made faster.
1627 int drm_order(unsigned long size)
1632 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1634 if (size & (size - 1))
1639 EXPORT_SYMBOL(drm_order);