2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/vmalloc.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/export.h>
35 #include <asm/shmparam.h>
37 #include "drm_legacy.h"
39 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
40 struct drm_local_map *map)
42 struct drm_map_list *entry;
43 list_for_each_entry(entry, &dev->maplist, head) {
45 * Because the kernel-userspace ABI is fixed at a 32-bit offset
46 * while PCI resources may live above that, we only compare the
47 * lower 32 bits of the map offset for maps of type
48 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
49 * It is assumed that if a driver have more than one resource
50 * of each type, the lower 32 bits are different.
53 map->type != entry->map->type ||
54 entry->master != dev->primary->master)
58 if (map->flags != _DRM_CONTAINS_LOCK)
62 case _DRM_FRAME_BUFFER:
63 if ((entry->map->offset & 0xffffffff) ==
64 (map->offset & 0xffffffff))
66 default: /* Make gcc happy */
69 if (entry->map->offset == map->offset)
76 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
77 unsigned long user_token, int hashed_handle, int shm)
79 int use_hashed_handle, shift;
82 #if (BITS_PER_LONG == 64)
83 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
84 #elif (BITS_PER_LONG == 32)
85 use_hashed_handle = hashed_handle;
87 #error Unsupported long size. Neither 64 nor 32 bits.
90 if (!use_hashed_handle) {
92 hash->key = user_token >> PAGE_SHIFT;
93 ret = drm_ht_insert_item(&dev->map_hash, hash);
99 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
100 if (shm && (SHMLBA > PAGE_SIZE)) {
101 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
103 /* For shared memory, we have to preserve the SHMLBA
104 * bits of the eventual vma->vm_pgoff value during
105 * mmap(). Otherwise we run into cache aliasing problems
106 * on some platforms. On these platforms, the pgoff of
107 * a mmap() request is used to pick a suitable virtual
108 * address for the mmap() region such that it will not
109 * cause cache aliasing problems.
111 * Therefore, make sure the SHMLBA relevant bits of the
112 * hash value we use are equal to those in the original
113 * kernel virtual address.
116 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
119 return drm_ht_just_insert_please(&dev->map_hash, hash,
120 user_token, 32 - PAGE_SHIFT - 3,
125 * Core function to create a range of memory available for mapping by a
128 * Adjusts the memory offset to its absolute value according to the mapping
129 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
130 * applicable and if supported by the kernel.
132 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
133 unsigned int size, enum drm_map_type type,
134 enum drm_map_flags flags,
135 struct drm_map_list ** maplist)
137 struct drm_local_map *map;
138 struct drm_map_list *list;
139 drm_dma_handle_t *dmah;
140 unsigned long user_token;
143 map = kmalloc(sizeof(*map), GFP_KERNEL);
147 map->offset = offset;
152 /* Only allow shared memory to be removable since we only keep enough
153 * book keeping information about shared memory to allow for removal
154 * when processes fork.
156 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
160 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
161 (unsigned long long)map->offset, map->size, map->type);
163 /* page-align _DRM_SHM maps. They are allocated here so there is no security
164 * hole created by that and it works around various broken drivers that use
165 * a non-aligned quantity to map the SAREA. --BenH
167 if (map->type == _DRM_SHM)
168 map->size = PAGE_ALIGN(map->size);
170 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
179 case _DRM_FRAME_BUFFER:
180 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
181 if (map->offset + (map->size-1) < map->offset ||
182 map->offset < virt_to_phys(high_memory)) {
187 /* Some drivers preinitialize some maps, without the X Server
188 * needing to be aware of it. Therefore, we just return success
189 * when the server tries to create a duplicate map.
191 list = drm_find_matching_map(dev, map);
193 if (list->map->size != map->size) {
194 DRM_DEBUG("Matching maps of type %d with "
195 "mismatched sizes, (%ld vs %ld)\n",
196 map->type, map->size,
198 list->map->size = map->size;
206 if (map->type == _DRM_FRAME_BUFFER ||
207 (map->flags & _DRM_WRITE_COMBINING)) {
209 arch_phys_wc_add(map->offset, map->size);
211 if (map->type == _DRM_REGISTERS) {
212 if (map->flags & _DRM_WRITE_COMBINING)
213 map->handle = ioremap_wc(map->offset,
216 map->handle = ioremap(map->offset, map->size);
225 list = drm_find_matching_map(dev, map);
227 if(list->map->size != map->size) {
228 DRM_DEBUG("Matching maps of type %d with "
229 "mismatched sizes, (%ld vs %ld)\n",
230 map->type, map->size, list->map->size);
231 list->map->size = map->size;
238 map->handle = vmalloc_user(map->size);
239 DRM_DEBUG("%lu %d %p\n",
240 map->size, order_base_2(map->size), map->handle);
245 map->offset = (unsigned long)map->handle;
246 if (map->flags & _DRM_CONTAINS_LOCK) {
247 /* Prevent a 2nd X Server from creating a 2nd lock */
248 if (dev->primary->master->lock.hw_lock != NULL) {
253 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
257 struct drm_agp_mem *entry;
265 map->offset += dev->hose->mem_space->start;
267 /* In some cases (i810 driver), user space may have already
268 * added the AGP base itself, because dev->agp->base previously
269 * only got set during AGP enable. So, only add the base
270 * address if the map's offset isn't already within the
273 if (map->offset < dev->agp->base ||
274 map->offset > dev->agp->base +
275 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
276 map->offset += dev->agp->base;
278 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
280 /* This assumes the DRM is in total control of AGP space.
281 * It's not always the case as AGP can be in the control
282 * of user space (i.e. i810 driver). So this loop will get
283 * skipped and we double check that dev->agp->memory is
284 * actually set as well as being invalid before EPERM'ing
286 list_for_each_entry(entry, &dev->agp->memory, head) {
287 if ((map->offset >= entry->bound) &&
288 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
293 if (!list_empty(&dev->agp->memory) && !valid) {
297 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
298 (unsigned long long)map->offset, map->size);
302 case _DRM_SCATTER_GATHER:
307 map->offset += (unsigned long)dev->sg->virtual;
309 case _DRM_CONSISTENT:
310 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
311 * As we're limiting the address to 2^32-1 (or less),
312 * casting it down to 32 bits is no problem, but we
313 * need to point to a 64bit variable first. */
314 dmah = drm_pci_alloc(dev, map->size, map->size);
319 map->handle = dmah->vaddr;
320 map->offset = (unsigned long)dmah->busaddr;
328 list = kzalloc(sizeof(*list), GFP_KERNEL);
330 if (map->type == _DRM_REGISTERS)
331 iounmap(map->handle);
337 mutex_lock(&dev->struct_mutex);
338 list_add(&list->head, &dev->maplist);
340 /* Assign a 32-bit handle */
341 /* We do it here so that dev->struct_mutex protects the increment */
342 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
344 ret = drm_map_handle(dev, &list->hash, user_token, 0,
345 (map->type == _DRM_SHM));
347 if (map->type == _DRM_REGISTERS)
348 iounmap(map->handle);
351 mutex_unlock(&dev->struct_mutex);
355 list->user_token = list->hash.key << PAGE_SHIFT;
356 mutex_unlock(&dev->struct_mutex);
358 if (!(map->flags & _DRM_DRIVER))
359 list->master = dev->primary->master;
364 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
365 unsigned int size, enum drm_map_type type,
366 enum drm_map_flags flags, struct drm_local_map **map_ptr)
368 struct drm_map_list *list;
371 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
373 *map_ptr = list->map;
376 EXPORT_SYMBOL(drm_legacy_addmap);
379 * Ioctl to specify a range of memory that is available for mapping by a
382 * \param inode device inode.
383 * \param file_priv DRM file private.
384 * \param cmd command.
385 * \param arg pointer to a drm_map structure.
386 * \return zero on success or a negative value on error.
389 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
390 struct drm_file *file_priv)
392 struct drm_map *map = data;
393 struct drm_map_list *maplist;
396 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
399 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
400 drm_core_check_feature(dev, DRIVER_MODESET))
403 err = drm_addmap_core(dev, map->offset, map->size, map->type,
404 map->flags, &maplist);
409 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
410 map->handle = (void *)(unsigned long)maplist->user_token;
413 * It appears that there are no users of this value whatsoever --
414 * drmAddMap just discards it. Let's not encourage its use.
415 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
416 * it's not a real mtrr index anymore.)
424 * Get a mapping information.
426 * \param inode device inode.
427 * \param file_priv DRM file private.
428 * \param cmd command.
429 * \param arg user argument, pointing to a drm_map structure.
431 * \return zero on success or a negative number on failure.
433 * Searches for the mapping with the specified offset and copies its information
436 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
437 struct drm_file *file_priv)
439 struct drm_map *map = data;
440 struct drm_map_list *r_list = NULL;
441 struct list_head *list;
445 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
446 drm_core_check_feature(dev, DRIVER_MODESET))
454 mutex_lock(&dev->struct_mutex);
455 list_for_each(list, &dev->maplist) {
457 r_list = list_entry(list, struct drm_map_list, head);
462 if (!r_list || !r_list->map) {
463 mutex_unlock(&dev->struct_mutex);
467 map->offset = r_list->map->offset;
468 map->size = r_list->map->size;
469 map->type = r_list->map->type;
470 map->flags = r_list->map->flags;
471 map->handle = (void *)(unsigned long) r_list->user_token;
472 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
474 mutex_unlock(&dev->struct_mutex);
480 * Remove a map private from list and deallocate resources if the mapping
483 * Searches the map on drm_device::maplist, removes it from the list, see if
484 * its being used, and free any associate resource (such as MTRR's) if it's not
487 * \sa drm_legacy_addmap
489 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
491 struct drm_map_list *r_list = NULL, *list_t;
492 drm_dma_handle_t dmah;
494 struct drm_master *master;
496 /* Find the list entry for the map and remove it */
497 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
498 if (r_list->map == map) {
499 master = r_list->master;
500 list_del(&r_list->head);
501 drm_ht_remove_key(&dev->map_hash,
502 r_list->user_token >> PAGE_SHIFT);
514 iounmap(map->handle);
516 case _DRM_FRAME_BUFFER:
517 arch_phys_wc_del(map->mtrr);
522 if (dev->sigdata.lock == master->lock.hw_lock)
523 dev->sigdata.lock = NULL;
524 master->lock.hw_lock = NULL; /* SHM removed */
525 master->lock.file_priv = NULL;
526 wake_up_interruptible_all(&master->lock.lock_queue);
530 case _DRM_SCATTER_GATHER:
532 case _DRM_CONSISTENT:
533 dmah.vaddr = map->handle;
534 dmah.busaddr = map->offset;
535 dmah.size = map->size;
536 __drm_legacy_pci_free(dev, &dmah);
543 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
545 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
547 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
548 drm_core_check_feature(dev, DRIVER_MODESET))
551 mutex_lock(&dev->struct_mutex);
552 drm_legacy_rmmap_locked(dev, map);
553 mutex_unlock(&dev->struct_mutex);
555 EXPORT_SYMBOL(drm_legacy_rmmap);
557 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
559 struct drm_map_list *r_list, *list_temp;
561 if (drm_core_check_feature(dev, DRIVER_MODESET))
564 mutex_lock(&dev->struct_mutex);
565 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
566 if (r_list->master == master) {
567 drm_legacy_rmmap_locked(dev, r_list->map);
571 mutex_unlock(&dev->struct_mutex);
574 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
575 * the last close of the device, and this is necessary for cleanup when things
576 * exit uncleanly. Therefore, having userland manually remove mappings seems
577 * like a pointless exercise since they're going away anyway.
579 * One use case might be after addmap is allowed for normal users for SHM and
580 * gets used by drivers that the server doesn't need to care about. This seems
583 * \param inode device inode.
584 * \param file_priv DRM file private.
585 * \param cmd command.
586 * \param arg pointer to a struct drm_map structure.
587 * \return zero on success or a negative value on error.
589 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
590 struct drm_file *file_priv)
592 struct drm_map *request = data;
593 struct drm_local_map *map = NULL;
594 struct drm_map_list *r_list;
597 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
598 drm_core_check_feature(dev, DRIVER_MODESET))
601 mutex_lock(&dev->struct_mutex);
602 list_for_each_entry(r_list, &dev->maplist, head) {
604 r_list->user_token == (unsigned long)request->handle &&
605 r_list->map->flags & _DRM_REMOVABLE) {
611 /* List has wrapped around to the head pointer, or its empty we didn't
614 if (list_empty(&dev->maplist) || !map) {
615 mutex_unlock(&dev->struct_mutex);
619 /* Register and framebuffer maps are permanent */
620 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
621 mutex_unlock(&dev->struct_mutex);
625 ret = drm_legacy_rmmap_locked(dev, map);
627 mutex_unlock(&dev->struct_mutex);
633 * Cleanup after an error on one of the addbufs() functions.
635 * \param dev DRM device.
636 * \param entry buffer entry where the error occurred.
638 * Frees any pages and buffers associated with the given entry.
640 static void drm_cleanup_buf_error(struct drm_device * dev,
641 struct drm_buf_entry * entry)
645 if (entry->seg_count) {
646 for (i = 0; i < entry->seg_count; i++) {
647 if (entry->seglist[i]) {
648 drm_pci_free(dev, entry->seglist[i]);
651 kfree(entry->seglist);
653 entry->seg_count = 0;
656 if (entry->buf_count) {
657 for (i = 0; i < entry->buf_count; i++) {
658 kfree(entry->buflist[i].dev_private);
660 kfree(entry->buflist);
662 entry->buf_count = 0;
666 #if IS_ENABLED(CONFIG_AGP)
668 * Add AGP buffers for DMA transfers.
670 * \param dev struct drm_device to which the buffers are to be added.
671 * \param request pointer to a struct drm_buf_desc describing the request.
672 * \return zero on success or a negative number on failure.
674 * After some sanity checks creates a drm_buf structure for each buffer and
675 * reallocates the buffer list of the same size order to accommodate the new
678 int drm_legacy_addbufs_agp(struct drm_device *dev,
679 struct drm_buf_desc *request)
681 struct drm_device_dma *dma = dev->dma;
682 struct drm_buf_entry *entry;
683 struct drm_agp_mem *agp_entry;
685 unsigned long offset;
686 unsigned long agp_offset;
695 struct drm_buf **temp_buflist;
700 count = request->count;
701 order = order_base_2(request->size);
704 alignment = (request->flags & _DRM_PAGE_ALIGN)
705 ? PAGE_ALIGN(size) : size;
706 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
707 total = PAGE_SIZE << page_order;
710 agp_offset = dev->agp->base + request->agp_start;
712 DRM_DEBUG("count: %d\n", count);
713 DRM_DEBUG("order: %d\n", order);
714 DRM_DEBUG("size: %d\n", size);
715 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
716 DRM_DEBUG("alignment: %d\n", alignment);
717 DRM_DEBUG("page_order: %d\n", page_order);
718 DRM_DEBUG("total: %d\n", total);
720 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
723 /* Make sure buffers are located in AGP memory that we own */
725 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
726 if ((agp_offset >= agp_entry->bound) &&
727 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
732 if (!list_empty(&dev->agp->memory) && !valid) {
733 DRM_DEBUG("zone invalid\n");
736 spin_lock(&dev->buf_lock);
738 spin_unlock(&dev->buf_lock);
741 atomic_inc(&dev->buf_alloc);
742 spin_unlock(&dev->buf_lock);
744 mutex_lock(&dev->struct_mutex);
745 entry = &dma->bufs[order];
746 if (entry->buf_count) {
747 mutex_unlock(&dev->struct_mutex);
748 atomic_dec(&dev->buf_alloc);
749 return -ENOMEM; /* May only call once for each order */
752 if (count < 0 || count > 4096) {
753 mutex_unlock(&dev->struct_mutex);
754 atomic_dec(&dev->buf_alloc);
758 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
759 if (!entry->buflist) {
760 mutex_unlock(&dev->struct_mutex);
761 atomic_dec(&dev->buf_alloc);
765 entry->buf_size = size;
766 entry->page_order = page_order;
770 while (entry->buf_count < count) {
771 buf = &entry->buflist[entry->buf_count];
772 buf->idx = dma->buf_count + entry->buf_count;
773 buf->total = alignment;
777 buf->offset = (dma->byte_count + offset);
778 buf->bus_address = agp_offset + offset;
779 buf->address = (void *)(agp_offset + offset);
783 buf->file_priv = NULL;
785 buf->dev_priv_size = dev->driver->dev_priv_size;
786 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
787 if (!buf->dev_private) {
788 /* Set count correctly so we free the proper amount. */
789 entry->buf_count = count;
790 drm_cleanup_buf_error(dev, entry);
791 mutex_unlock(&dev->struct_mutex);
792 atomic_dec(&dev->buf_alloc);
796 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
800 byte_count += PAGE_SIZE << page_order;
803 DRM_DEBUG("byte_count: %d\n", byte_count);
805 temp_buflist = krealloc(dma->buflist,
806 (dma->buf_count + entry->buf_count) *
807 sizeof(*dma->buflist), GFP_KERNEL);
809 /* Free the entry because it isn't valid */
810 drm_cleanup_buf_error(dev, entry);
811 mutex_unlock(&dev->struct_mutex);
812 atomic_dec(&dev->buf_alloc);
815 dma->buflist = temp_buflist;
817 for (i = 0; i < entry->buf_count; i++) {
818 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
821 dma->buf_count += entry->buf_count;
822 dma->seg_count += entry->seg_count;
823 dma->page_count += byte_count >> PAGE_SHIFT;
824 dma->byte_count += byte_count;
826 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
827 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
829 mutex_unlock(&dev->struct_mutex);
831 request->count = entry->buf_count;
832 request->size = size;
834 dma->flags = _DRM_DMA_USE_AGP;
836 atomic_dec(&dev->buf_alloc);
839 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
840 #endif /* CONFIG_AGP */
842 int drm_legacy_addbufs_pci(struct drm_device *dev,
843 struct drm_buf_desc *request)
845 struct drm_device_dma *dma = dev->dma;
851 struct drm_buf_entry *entry;
852 drm_dma_handle_t *dmah;
855 unsigned long offset;
859 unsigned long *temp_pagelist;
860 struct drm_buf **temp_buflist;
862 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
868 if (!capable(CAP_SYS_ADMIN))
871 count = request->count;
872 order = order_base_2(request->size);
875 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
876 request->count, request->size, size, order);
878 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
881 alignment = (request->flags & _DRM_PAGE_ALIGN)
882 ? PAGE_ALIGN(size) : size;
883 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
884 total = PAGE_SIZE << page_order;
886 spin_lock(&dev->buf_lock);
888 spin_unlock(&dev->buf_lock);
891 atomic_inc(&dev->buf_alloc);
892 spin_unlock(&dev->buf_lock);
894 mutex_lock(&dev->struct_mutex);
895 entry = &dma->bufs[order];
896 if (entry->buf_count) {
897 mutex_unlock(&dev->struct_mutex);
898 atomic_dec(&dev->buf_alloc);
899 return -ENOMEM; /* May only call once for each order */
902 if (count < 0 || count > 4096) {
903 mutex_unlock(&dev->struct_mutex);
904 atomic_dec(&dev->buf_alloc);
908 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
909 if (!entry->buflist) {
910 mutex_unlock(&dev->struct_mutex);
911 atomic_dec(&dev->buf_alloc);
915 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
916 if (!entry->seglist) {
917 kfree(entry->buflist);
918 mutex_unlock(&dev->struct_mutex);
919 atomic_dec(&dev->buf_alloc);
923 /* Keep the original pagelist until we know all the allocations
926 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
927 sizeof(*dma->pagelist), GFP_KERNEL);
928 if (!temp_pagelist) {
929 kfree(entry->buflist);
930 kfree(entry->seglist);
931 mutex_unlock(&dev->struct_mutex);
932 atomic_dec(&dev->buf_alloc);
935 memcpy(temp_pagelist,
936 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
937 DRM_DEBUG("pagelist: %d entries\n",
938 dma->page_count + (count << page_order));
940 entry->buf_size = size;
941 entry->page_order = page_order;
945 while (entry->buf_count < count) {
947 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
950 /* Set count correctly so we free the proper amount. */
951 entry->buf_count = count;
952 entry->seg_count = count;
953 drm_cleanup_buf_error(dev, entry);
954 kfree(temp_pagelist);
955 mutex_unlock(&dev->struct_mutex);
956 atomic_dec(&dev->buf_alloc);
959 entry->seglist[entry->seg_count++] = dmah;
960 for (i = 0; i < (1 << page_order); i++) {
961 DRM_DEBUG("page %d @ 0x%08lx\n",
962 dma->page_count + page_count,
963 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
964 temp_pagelist[dma->page_count + page_count++]
965 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
968 offset + size <= total && entry->buf_count < count;
969 offset += alignment, ++entry->buf_count) {
970 buf = &entry->buflist[entry->buf_count];
971 buf->idx = dma->buf_count + entry->buf_count;
972 buf->total = alignment;
975 buf->offset = (dma->byte_count + byte_count + offset);
976 buf->address = (void *)(dmah->vaddr + offset);
977 buf->bus_address = dmah->busaddr + offset;
981 buf->file_priv = NULL;
983 buf->dev_priv_size = dev->driver->dev_priv_size;
984 buf->dev_private = kzalloc(buf->dev_priv_size,
986 if (!buf->dev_private) {
987 /* Set count correctly so we free the proper amount. */
988 entry->buf_count = count;
989 entry->seg_count = count;
990 drm_cleanup_buf_error(dev, entry);
991 kfree(temp_pagelist);
992 mutex_unlock(&dev->struct_mutex);
993 atomic_dec(&dev->buf_alloc);
997 DRM_DEBUG("buffer %d @ %p\n",
998 entry->buf_count, buf->address);
1000 byte_count += PAGE_SIZE << page_order;
1003 temp_buflist = krealloc(dma->buflist,
1004 (dma->buf_count + entry->buf_count) *
1005 sizeof(*dma->buflist), GFP_KERNEL);
1006 if (!temp_buflist) {
1007 /* Free the entry because it isn't valid */
1008 drm_cleanup_buf_error(dev, entry);
1009 kfree(temp_pagelist);
1010 mutex_unlock(&dev->struct_mutex);
1011 atomic_dec(&dev->buf_alloc);
1014 dma->buflist = temp_buflist;
1016 for (i = 0; i < entry->buf_count; i++) {
1017 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1020 /* No allocations failed, so now we can replace the original pagelist
1023 if (dma->page_count) {
1024 kfree(dma->pagelist);
1026 dma->pagelist = temp_pagelist;
1028 dma->buf_count += entry->buf_count;
1029 dma->seg_count += entry->seg_count;
1030 dma->page_count += entry->seg_count << page_order;
1031 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1033 mutex_unlock(&dev->struct_mutex);
1035 request->count = entry->buf_count;
1036 request->size = size;
1038 if (request->flags & _DRM_PCI_BUFFER_RO)
1039 dma->flags = _DRM_DMA_USE_PCI_RO;
1041 atomic_dec(&dev->buf_alloc);
1045 EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1047 static int drm_legacy_addbufs_sg(struct drm_device *dev,
1048 struct drm_buf_desc *request)
1050 struct drm_device_dma *dma = dev->dma;
1051 struct drm_buf_entry *entry;
1052 struct drm_buf *buf;
1053 unsigned long offset;
1054 unsigned long agp_offset;
1063 struct drm_buf **temp_buflist;
1065 if (!drm_core_check_feature(dev, DRIVER_SG))
1071 if (!capable(CAP_SYS_ADMIN))
1074 count = request->count;
1075 order = order_base_2(request->size);
1078 alignment = (request->flags & _DRM_PAGE_ALIGN)
1079 ? PAGE_ALIGN(size) : size;
1080 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1081 total = PAGE_SIZE << page_order;
1084 agp_offset = request->agp_start;
1086 DRM_DEBUG("count: %d\n", count);
1087 DRM_DEBUG("order: %d\n", order);
1088 DRM_DEBUG("size: %d\n", size);
1089 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1090 DRM_DEBUG("alignment: %d\n", alignment);
1091 DRM_DEBUG("page_order: %d\n", page_order);
1092 DRM_DEBUG("total: %d\n", total);
1094 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1097 spin_lock(&dev->buf_lock);
1099 spin_unlock(&dev->buf_lock);
1102 atomic_inc(&dev->buf_alloc);
1103 spin_unlock(&dev->buf_lock);
1105 mutex_lock(&dev->struct_mutex);
1106 entry = &dma->bufs[order];
1107 if (entry->buf_count) {
1108 mutex_unlock(&dev->struct_mutex);
1109 atomic_dec(&dev->buf_alloc);
1110 return -ENOMEM; /* May only call once for each order */
1113 if (count < 0 || count > 4096) {
1114 mutex_unlock(&dev->struct_mutex);
1115 atomic_dec(&dev->buf_alloc);
1119 entry->buflist = kzalloc(count * sizeof(*entry->buflist),
1121 if (!entry->buflist) {
1122 mutex_unlock(&dev->struct_mutex);
1123 atomic_dec(&dev->buf_alloc);
1127 entry->buf_size = size;
1128 entry->page_order = page_order;
1132 while (entry->buf_count < count) {
1133 buf = &entry->buflist[entry->buf_count];
1134 buf->idx = dma->buf_count + entry->buf_count;
1135 buf->total = alignment;
1139 buf->offset = (dma->byte_count + offset);
1140 buf->bus_address = agp_offset + offset;
1141 buf->address = (void *)(agp_offset + offset
1142 + (unsigned long)dev->sg->virtual);
1146 buf->file_priv = NULL;
1148 buf->dev_priv_size = dev->driver->dev_priv_size;
1149 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1150 if (!buf->dev_private) {
1151 /* Set count correctly so we free the proper amount. */
1152 entry->buf_count = count;
1153 drm_cleanup_buf_error(dev, entry);
1154 mutex_unlock(&dev->struct_mutex);
1155 atomic_dec(&dev->buf_alloc);
1159 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1161 offset += alignment;
1163 byte_count += PAGE_SIZE << page_order;
1166 DRM_DEBUG("byte_count: %d\n", byte_count);
1168 temp_buflist = krealloc(dma->buflist,
1169 (dma->buf_count + entry->buf_count) *
1170 sizeof(*dma->buflist), GFP_KERNEL);
1171 if (!temp_buflist) {
1172 /* Free the entry because it isn't valid */
1173 drm_cleanup_buf_error(dev, entry);
1174 mutex_unlock(&dev->struct_mutex);
1175 atomic_dec(&dev->buf_alloc);
1178 dma->buflist = temp_buflist;
1180 for (i = 0; i < entry->buf_count; i++) {
1181 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1184 dma->buf_count += entry->buf_count;
1185 dma->seg_count += entry->seg_count;
1186 dma->page_count += byte_count >> PAGE_SHIFT;
1187 dma->byte_count += byte_count;
1189 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1190 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1192 mutex_unlock(&dev->struct_mutex);
1194 request->count = entry->buf_count;
1195 request->size = size;
1197 dma->flags = _DRM_DMA_USE_SG;
1199 atomic_dec(&dev->buf_alloc);
1204 * Add buffers for DMA transfers (ioctl).
1206 * \param inode device inode.
1207 * \param file_priv DRM file private.
1208 * \param cmd command.
1209 * \param arg pointer to a struct drm_buf_desc request.
1210 * \return zero on success or a negative number on failure.
1212 * According with the memory type specified in drm_buf_desc::flags and the
1213 * build options, it dispatches the call either to addbufs_agp(),
1214 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1215 * PCI memory respectively.
1217 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1218 struct drm_file *file_priv)
1220 struct drm_buf_desc *request = data;
1223 if (drm_core_check_feature(dev, DRIVER_MODESET))
1226 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1229 #if IS_ENABLED(CONFIG_AGP)
1230 if (request->flags & _DRM_AGP_BUFFER)
1231 ret = drm_legacy_addbufs_agp(dev, request);
1234 if (request->flags & _DRM_SG_BUFFER)
1235 ret = drm_legacy_addbufs_sg(dev, request);
1236 else if (request->flags & _DRM_FB_BUFFER)
1239 ret = drm_legacy_addbufs_pci(dev, request);
1245 * Get information about the buffer mappings.
1247 * This was originally mean for debugging purposes, or by a sophisticated
1248 * client library to determine how best to use the available buffers (e.g.,
1249 * large buffers can be used for image transfer).
1251 * \param inode device inode.
1252 * \param file_priv DRM file private.
1253 * \param cmd command.
1254 * \param arg pointer to a drm_buf_info structure.
1255 * \return zero on success or a negative number on failure.
1257 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1258 * lock, preventing of allocating more buffers after this call. Information
1259 * about each requested buffer is then copied into user space.
1261 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1262 struct drm_file *file_priv)
1264 struct drm_device_dma *dma = dev->dma;
1265 struct drm_buf_info *request = data;
1269 if (drm_core_check_feature(dev, DRIVER_MODESET))
1272 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1278 spin_lock(&dev->buf_lock);
1279 if (atomic_read(&dev->buf_alloc)) {
1280 spin_unlock(&dev->buf_lock);
1283 ++dev->buf_use; /* Can't allocate more after this call */
1284 spin_unlock(&dev->buf_lock);
1286 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1287 if (dma->bufs[i].buf_count)
1291 DRM_DEBUG("count = %d\n", count);
1293 if (request->count >= count) {
1294 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1295 if (dma->bufs[i].buf_count) {
1296 struct drm_buf_desc __user *to =
1297 &request->list[count];
1298 struct drm_buf_entry *from = &dma->bufs[i];
1299 if (copy_to_user(&to->count,
1301 sizeof(from->buf_count)) ||
1302 copy_to_user(&to->size,
1304 sizeof(from->buf_size)) ||
1305 copy_to_user(&to->low_mark,
1307 sizeof(from->low_mark)) ||
1308 copy_to_user(&to->high_mark,
1310 sizeof(from->high_mark)))
1313 DRM_DEBUG("%d %d %d %d %d\n",
1315 dma->bufs[i].buf_count,
1316 dma->bufs[i].buf_size,
1317 dma->bufs[i].low_mark,
1318 dma->bufs[i].high_mark);
1323 request->count = count;
1329 * Specifies a low and high water mark for buffer allocation
1331 * \param inode device inode.
1332 * \param file_priv DRM file private.
1333 * \param cmd command.
1334 * \param arg a pointer to a drm_buf_desc structure.
1335 * \return zero on success or a negative number on failure.
1337 * Verifies that the size order is bounded between the admissible orders and
1338 * updates the respective drm_device_dma::bufs entry low and high water mark.
1340 * \note This ioctl is deprecated and mostly never used.
1342 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1343 struct drm_file *file_priv)
1345 struct drm_device_dma *dma = dev->dma;
1346 struct drm_buf_desc *request = data;
1348 struct drm_buf_entry *entry;
1350 if (drm_core_check_feature(dev, DRIVER_MODESET))
1353 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1359 DRM_DEBUG("%d, %d, %d\n",
1360 request->size, request->low_mark, request->high_mark);
1361 order = order_base_2(request->size);
1362 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1364 entry = &dma->bufs[order];
1366 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1368 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1371 entry->low_mark = request->low_mark;
1372 entry->high_mark = request->high_mark;
1378 * Unreserve the buffers in list, previously reserved using drmDMA.
1380 * \param inode device inode.
1381 * \param file_priv DRM file private.
1382 * \param cmd command.
1383 * \param arg pointer to a drm_buf_free structure.
1384 * \return zero on success or a negative number on failure.
1386 * Calls free_buffer() for each used buffer.
1387 * This function is primarily used for debugging.
1389 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1390 struct drm_file *file_priv)
1392 struct drm_device_dma *dma = dev->dma;
1393 struct drm_buf_free *request = data;
1396 struct drm_buf *buf;
1398 if (drm_core_check_feature(dev, DRIVER_MODESET))
1401 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1407 DRM_DEBUG("%d\n", request->count);
1408 for (i = 0; i < request->count; i++) {
1409 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1411 if (idx < 0 || idx >= dma->buf_count) {
1412 DRM_ERROR("Index %d (of %d max)\n",
1413 idx, dma->buf_count - 1);
1416 buf = dma->buflist[idx];
1417 if (buf->file_priv != file_priv) {
1418 DRM_ERROR("Process %d freeing buffer not owned\n",
1419 task_pid_nr(current));
1422 drm_legacy_free_buffer(dev, buf);
1429 * Maps all of the DMA buffers into client-virtual space (ioctl).
1431 * \param inode device inode.
1432 * \param file_priv DRM file private.
1433 * \param cmd command.
1434 * \param arg pointer to a drm_buf_map structure.
1435 * \return zero on success or a negative number on failure.
1437 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1438 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1439 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1442 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1443 struct drm_file *file_priv)
1445 struct drm_device_dma *dma = dev->dma;
1448 unsigned long virtual;
1449 unsigned long address;
1450 struct drm_buf_map *request = data;
1453 if (drm_core_check_feature(dev, DRIVER_MODESET))
1456 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1462 spin_lock(&dev->buf_lock);
1463 if (atomic_read(&dev->buf_alloc)) {
1464 spin_unlock(&dev->buf_lock);
1467 dev->buf_use++; /* Can't allocate more after this call */
1468 spin_unlock(&dev->buf_lock);
1470 if (request->count >= dma->buf_count) {
1471 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1472 || (drm_core_check_feature(dev, DRIVER_SG)
1473 && (dma->flags & _DRM_DMA_USE_SG))) {
1474 struct drm_local_map *map = dev->agp_buffer_map;
1475 unsigned long token = dev->agp_buffer_token;
1481 virtual = vm_mmap(file_priv->filp, 0, map->size,
1482 PROT_READ | PROT_WRITE,
1486 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1487 PROT_READ | PROT_WRITE,
1490 if (virtual > -1024UL) {
1492 retcode = (signed long)virtual;
1495 request->virtual = (void __user *)virtual;
1497 for (i = 0; i < dma->buf_count; i++) {
1498 if (copy_to_user(&request->list[i].idx,
1499 &dma->buflist[i]->idx,
1500 sizeof(request->list[0].idx))) {
1504 if (copy_to_user(&request->list[i].total,
1505 &dma->buflist[i]->total,
1506 sizeof(request->list[0].total))) {
1510 if (copy_to_user(&request->list[i].used,
1511 &zero, sizeof(zero))) {
1515 address = virtual + dma->buflist[i]->offset; /* *** */
1516 if (copy_to_user(&request->list[i].address,
1517 &address, sizeof(address))) {
1524 request->count = dma->buf_count;
1525 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1530 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1531 struct drm_file *file_priv)
1533 if (drm_core_check_feature(dev, DRIVER_MODESET))
1536 if (dev->driver->dma_ioctl)
1537 return dev->driver->dma_ioctl(dev, data, file_priv);
1542 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1544 struct drm_map_list *entry;
1546 list_for_each_entry(entry, &dev->maplist, head) {
1547 if (entry->map && entry->map->type == _DRM_SHM &&
1548 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1554 EXPORT_SYMBOL(drm_legacy_getsarea);