3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
44 EXPORT_SYMBOL(drm_get_resource_start);
46 unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
48 return pci_resource_len(dev->pdev, resource);
51 EXPORT_SYMBOL(drm_get_resource_len);
53 static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
54 drm_local_map_t * map)
56 struct list_head *list;
58 list_for_each(list, &dev->maplist->head) {
59 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
60 if (entry->map && map->type == entry->map->type &&
61 entry->map->offset == map->offset) {
70 * Used to allocate 32-bit handles for mappings.
72 #define START_RANGE 0x10000000
73 #define END_RANGE 0x40000000
76 static __inline__ unsigned int HandleID(unsigned long lhandle,
79 static unsigned int map32_handle = START_RANGE;
82 if (lhandle & 0xffffffff00000000) {
84 map32_handle += PAGE_SIZE;
85 if (map32_handle > END_RANGE)
86 map32_handle = START_RANGE;
91 drm_map_list_t *_entry;
92 list_for_each_entry(_entry, &dev->maplist->head, head) {
93 if (_entry->user_token == hash)
96 if (&_entry->head == &dev->maplist->head)
100 map32_handle += PAGE_SIZE;
104 # define HandleID(x,dev) (unsigned int)(x)
108 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
110 * \param inode device inode.
111 * \param filp file pointer.
112 * \param cmd command.
113 * \param arg pointer to a drm_map structure.
114 * \return zero on success or a negative value on error.
116 * Adjusts the memory offset to its absolute value according to the mapping
117 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
118 * applicable and if supported by the kernel.
120 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
121 unsigned int size, drm_map_type_t type,
122 drm_map_flags_t flags, drm_map_list_t ** maplist)
125 drm_map_list_t *list;
126 drm_dma_handle_t *dmah;
128 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
132 map->offset = offset;
137 /* Only allow shared memory to be removable since we only keep enough
138 * book keeping information about shared memory to allow for removal
139 * when processes fork.
141 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
142 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
145 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
146 map->offset, map->size, map->type);
147 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
148 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
156 case _DRM_FRAME_BUFFER:
157 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
158 if (map->offset + map->size < map->offset ||
159 map->offset < virt_to_phys(high_memory)) {
160 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
165 map->offset += dev->hose->mem_space->start;
167 /* Some drivers preinitialize some maps, without the X Server
168 * needing to be aware of it. Therefore, we just return success
169 * when the server tries to create a duplicate map.
171 list = drm_find_matching_map(dev, map);
173 if (list->map->size != map->size) {
174 DRM_DEBUG("Matching maps of type %d with "
175 "mismatched sizes, (%ld vs %ld)\n",
176 map->type, map->size,
178 list->map->size = map->size;
181 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
186 if (drm_core_has_MTRR(dev)) {
187 if (map->type == _DRM_FRAME_BUFFER ||
188 (map->flags & _DRM_WRITE_COMBINING)) {
189 map->mtrr = mtrr_add(map->offset, map->size,
190 MTRR_TYPE_WRCOMB, 1);
193 if (map->type == _DRM_REGISTERS)
194 map->handle = drm_ioremap(map->offset, map->size, dev);
198 map->handle = vmalloc_32(map->size);
199 DRM_DEBUG("%lu %d %p\n",
200 map->size, drm_order(map->size), map->handle);
202 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
205 map->offset = (unsigned long)map->handle;
206 if (map->flags & _DRM_CONTAINS_LOCK) {
207 /* Prevent a 2nd X Server from creating a 2nd lock */
208 if (dev->lock.hw_lock != NULL) {
210 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
213 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
217 if (drm_core_has_AGP(dev)) {
219 map->offset += dev->hose->mem_space->start;
221 map->offset += dev->agp->base;
222 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
225 case _DRM_SCATTER_GATHER:
227 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
230 map->offset += (unsigned long)dev->sg->virtual;
232 case _DRM_CONSISTENT:
233 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
234 * As we're limiting the address to 2^32-1 (or less),
235 * casting it down to 32 bits is no problem, but we
236 * need to point to a 64bit variable first. */
237 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
239 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
242 map->handle = dmah->vaddr;
243 map->offset = (unsigned long)dmah->busaddr;
247 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
251 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
253 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
256 memset(list, 0, sizeof(*list));
259 down(&dev->struct_sem);
260 list_add(&list->head, &dev->maplist->head);
261 /* Assign a 32-bit handle */
262 /* We do it here so that dev->struct_sem protects the increment */
263 list->user_token = HandleID(map->type == _DRM_SHM
264 ? (unsigned long)map->handle
266 up(&dev->struct_sem);
272 int drm_addmap(drm_device_t * dev, unsigned int offset,
273 unsigned int size, drm_map_type_t type,
274 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
276 drm_map_list_t *list;
279 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
281 *map_ptr = list->map;
285 EXPORT_SYMBOL(drm_addmap);
287 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
288 unsigned int cmd, unsigned long arg)
290 drm_file_t *priv = filp->private_data;
291 drm_device_t *dev = priv->head->dev;
293 drm_map_list_t *maplist;
294 drm_map_t __user *argp = (void __user *)arg;
297 if (!(filp->f_mode & 3))
298 return -EACCES; /* Require read/write */
300 if (copy_from_user(&map, argp, sizeof(map))) {
304 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
310 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
312 if (put_user((void *)maplist->user_token, &argp->handle))
318 * Remove a map private from list and deallocate resources if the mapping
321 * \param inode device inode.
322 * \param filp file pointer.
323 * \param cmd command.
324 * \param arg pointer to a drm_map_t structure.
325 * \return zero on success or a negative value on error.
327 * Searches the map on drm_device::maplist, removes it from the list, see if
328 * its being used, and free any associate resource (such as MTRR's) if it's not
333 int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
335 struct list_head *list;
336 drm_map_list_t *r_list = NULL;
337 drm_dma_handle_t dmah;
339 /* Find the list entry for the map and remove it */
340 list_for_each(list, &dev->maplist->head) {
341 r_list = list_entry(list, drm_map_list_t, head);
343 if (r_list->map == map) {
345 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
350 /* List has wrapped around to the head pointer, or it's empty and we
351 * didn't find anything.
353 if (list == (&dev->maplist->head)) {
359 drm_ioremapfree(map->handle, map->size, dev);
361 case _DRM_FRAME_BUFFER:
362 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
364 retcode = mtrr_del(map->mtrr, map->offset, map->size);
365 DRM_DEBUG("mtrr_del=%d\n", retcode);
372 case _DRM_SCATTER_GATHER:
374 case _DRM_CONSISTENT:
375 dmah.vaddr = map->handle;
376 dmah.busaddr = map->offset;
377 dmah.size = map->size;
378 __drm_pci_free(dev, &dmah);
381 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
386 EXPORT_SYMBOL(drm_rmmap_locked);
388 int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
392 down(&dev->struct_sem);
393 ret = drm_rmmap_locked(dev, map);
394 up(&dev->struct_sem);
399 EXPORT_SYMBOL(drm_rmmap);
401 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
402 * the last close of the device, and this is necessary for cleanup when things
403 * exit uncleanly. Therefore, having userland manually remove mappings seems
404 * like a pointless exercise since they're going away anyway.
406 * One use case might be after addmap is allowed for normal users for SHM and
407 * gets used by drivers that the server doesn't need to care about. This seems
410 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
411 unsigned int cmd, unsigned long arg)
413 drm_file_t *priv = filp->private_data;
414 drm_device_t *dev = priv->head->dev;
416 drm_local_map_t *map = NULL;
417 struct list_head *list;
420 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
424 down(&dev->struct_sem);
425 list_for_each(list, &dev->maplist->head) {
426 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
429 r_list->user_token == (unsigned long)request.handle &&
430 r_list->map->flags & _DRM_REMOVABLE) {
436 /* List has wrapped around to the head pointer, or its empty we didn't
439 if (list == (&dev->maplist->head)) {
440 up(&dev->struct_sem);
447 /* Register and framebuffer maps are permanent */
448 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
449 up(&dev->struct_sem);
453 ret = drm_rmmap_locked(dev, map);
455 up(&dev->struct_sem);
461 * Cleanup after an error on one of the addbufs() functions.
463 * \param dev DRM device.
464 * \param entry buffer entry where the error occurred.
466 * Frees any pages and buffers associated with the given entry.
468 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
472 if (entry->seg_count) {
473 for (i = 0; i < entry->seg_count; i++) {
474 if (entry->seglist[i]) {
475 drm_free_pages(entry->seglist[i],
476 entry->page_order, DRM_MEM_DMA);
479 drm_free(entry->seglist,
481 sizeof(*entry->seglist), DRM_MEM_SEGS);
483 entry->seg_count = 0;
486 if (entry->buf_count) {
487 for (i = 0; i < entry->buf_count; i++) {
488 if (entry->buflist[i].dev_private) {
489 drm_free(entry->buflist[i].dev_private,
490 entry->buflist[i].dev_priv_size,
494 drm_free(entry->buflist,
496 sizeof(*entry->buflist), DRM_MEM_BUFS);
498 entry->buf_count = 0;
504 * Add AGP buffers for DMA transfers.
506 * \param dev drm_device_t to which the buffers are to be added.
507 * \param request pointer to a drm_buf_desc_t describing the request.
508 * \return zero on success or a negative number on failure.
510 * After some sanity checks creates a drm_buf structure for each buffer and
511 * reallocates the buffer list of the same size order to accommodate the new
514 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
516 drm_device_dma_t *dma = dev->dma;
517 drm_buf_entry_t *entry;
519 unsigned long offset;
520 unsigned long agp_offset;
529 drm_buf_t **temp_buflist;
534 count = request->count;
535 order = drm_order(request->size);
538 alignment = (request->flags & _DRM_PAGE_ALIGN)
539 ? PAGE_ALIGN(size) : size;
540 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
541 total = PAGE_SIZE << page_order;
544 agp_offset = dev->agp->base + request->agp_start;
546 DRM_DEBUG("count: %d\n", count);
547 DRM_DEBUG("order: %d\n", order);
548 DRM_DEBUG("size: %d\n", size);
549 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
550 DRM_DEBUG("alignment: %d\n", alignment);
551 DRM_DEBUG("page_order: %d\n", page_order);
552 DRM_DEBUG("total: %d\n", total);
554 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
556 if (dev->queue_count)
557 return -EBUSY; /* Not while in use */
559 spin_lock(&dev->count_lock);
561 spin_unlock(&dev->count_lock);
564 atomic_inc(&dev->buf_alloc);
565 spin_unlock(&dev->count_lock);
567 down(&dev->struct_sem);
568 entry = &dma->bufs[order];
569 if (entry->buf_count) {
570 up(&dev->struct_sem);
571 atomic_dec(&dev->buf_alloc);
572 return -ENOMEM; /* May only call once for each order */
575 if (count < 0 || count > 4096) {
576 up(&dev->struct_sem);
577 atomic_dec(&dev->buf_alloc);
581 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
583 if (!entry->buflist) {
584 up(&dev->struct_sem);
585 atomic_dec(&dev->buf_alloc);
588 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
590 entry->buf_size = size;
591 entry->page_order = page_order;
595 while (entry->buf_count < count) {
596 buf = &entry->buflist[entry->buf_count];
597 buf->idx = dma->buf_count + entry->buf_count;
598 buf->total = alignment;
602 buf->offset = (dma->byte_count + offset);
603 buf->bus_address = agp_offset + offset;
604 buf->address = (void *)(agp_offset + offset);
608 init_waitqueue_head(&buf->dma_wait);
611 buf->dev_priv_size = dev->driver->dev_priv_size;
612 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
613 if (!buf->dev_private) {
614 /* Set count correctly so we free the proper amount. */
615 entry->buf_count = count;
616 drm_cleanup_buf_error(dev, entry);
617 up(&dev->struct_sem);
618 atomic_dec(&dev->buf_alloc);
621 memset(buf->dev_private, 0, buf->dev_priv_size);
623 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
627 byte_count += PAGE_SIZE << page_order;
630 DRM_DEBUG("byte_count: %d\n", byte_count);
632 temp_buflist = drm_realloc(dma->buflist,
633 dma->buf_count * sizeof(*dma->buflist),
634 (dma->buf_count + entry->buf_count)
635 * sizeof(*dma->buflist), DRM_MEM_BUFS);
637 /* Free the entry because it isn't valid */
638 drm_cleanup_buf_error(dev, entry);
639 up(&dev->struct_sem);
640 atomic_dec(&dev->buf_alloc);
643 dma->buflist = temp_buflist;
645 for (i = 0; i < entry->buf_count; i++) {
646 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
649 dma->buf_count += entry->buf_count;
650 dma->byte_count += byte_count;
652 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
653 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
655 up(&dev->struct_sem);
657 request->count = entry->buf_count;
658 request->size = size;
660 dma->flags = _DRM_DMA_USE_AGP;
662 atomic_dec(&dev->buf_alloc);
666 EXPORT_SYMBOL(drm_addbufs_agp);
667 #endif /* __OS_HAS_AGP */
669 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
671 drm_device_dma_t *dma = dev->dma;
677 drm_buf_entry_t *entry;
681 unsigned long offset;
685 unsigned long *temp_pagelist;
686 drm_buf_t **temp_buflist;
688 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
693 count = request->count;
694 order = drm_order(request->size);
697 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
698 request->count, request->size, size, order, dev->queue_count);
700 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
702 if (dev->queue_count)
703 return -EBUSY; /* Not while in use */
705 alignment = (request->flags & _DRM_PAGE_ALIGN)
706 ? PAGE_ALIGN(size) : size;
707 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
708 total = PAGE_SIZE << page_order;
710 spin_lock(&dev->count_lock);
712 spin_unlock(&dev->count_lock);
715 atomic_inc(&dev->buf_alloc);
716 spin_unlock(&dev->count_lock);
718 down(&dev->struct_sem);
719 entry = &dma->bufs[order];
720 if (entry->buf_count) {
721 up(&dev->struct_sem);
722 atomic_dec(&dev->buf_alloc);
723 return -ENOMEM; /* May only call once for each order */
726 if (count < 0 || count > 4096) {
727 up(&dev->struct_sem);
728 atomic_dec(&dev->buf_alloc);
732 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
734 if (!entry->buflist) {
735 up(&dev->struct_sem);
736 atomic_dec(&dev->buf_alloc);
739 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
741 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
743 if (!entry->seglist) {
744 drm_free(entry->buflist,
745 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
746 up(&dev->struct_sem);
747 atomic_dec(&dev->buf_alloc);
750 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
752 /* Keep the original pagelist until we know all the allocations
755 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
756 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
757 if (!temp_pagelist) {
758 drm_free(entry->buflist,
759 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
760 drm_free(entry->seglist,
761 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
762 up(&dev->struct_sem);
763 atomic_dec(&dev->buf_alloc);
766 memcpy(temp_pagelist,
767 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
768 DRM_DEBUG("pagelist: %d entries\n",
769 dma->page_count + (count << page_order));
771 entry->buf_size = size;
772 entry->page_order = page_order;
776 while (entry->buf_count < count) {
777 page = drm_alloc_pages(page_order, DRM_MEM_DMA);
779 /* Set count correctly so we free the proper amount. */
780 entry->buf_count = count;
781 entry->seg_count = count;
782 drm_cleanup_buf_error(dev, entry);
783 drm_free(temp_pagelist,
784 (dma->page_count + (count << page_order))
785 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
786 up(&dev->struct_sem);
787 atomic_dec(&dev->buf_alloc);
790 entry->seglist[entry->seg_count++] = page;
791 for (i = 0; i < (1 << page_order); i++) {
792 DRM_DEBUG("page %d @ 0x%08lx\n",
793 dma->page_count + page_count,
794 page + PAGE_SIZE * i);
795 temp_pagelist[dma->page_count + page_count++]
796 = page + PAGE_SIZE * i;
799 offset + size <= total && entry->buf_count < count;
800 offset += alignment, ++entry->buf_count) {
801 buf = &entry->buflist[entry->buf_count];
802 buf->idx = dma->buf_count + entry->buf_count;
803 buf->total = alignment;
806 buf->offset = (dma->byte_count + byte_count + offset);
807 buf->address = (void *)(page + offset);
811 init_waitqueue_head(&buf->dma_wait);
814 buf->dev_priv_size = dev->driver->dev_priv_size;
815 buf->dev_private = drm_alloc(buf->dev_priv_size,
817 if (!buf->dev_private) {
818 /* Set count correctly so we free the proper amount. */
819 entry->buf_count = count;
820 entry->seg_count = count;
821 drm_cleanup_buf_error(dev, entry);
822 drm_free(temp_pagelist,
824 (count << page_order))
825 * sizeof(*dma->pagelist),
827 up(&dev->struct_sem);
828 atomic_dec(&dev->buf_alloc);
831 memset(buf->dev_private, 0, buf->dev_priv_size);
833 DRM_DEBUG("buffer %d @ %p\n",
834 entry->buf_count, buf->address);
836 byte_count += PAGE_SIZE << page_order;
839 temp_buflist = drm_realloc(dma->buflist,
840 dma->buf_count * sizeof(*dma->buflist),
841 (dma->buf_count + entry->buf_count)
842 * sizeof(*dma->buflist), DRM_MEM_BUFS);
844 /* Free the entry because it isn't valid */
845 drm_cleanup_buf_error(dev, entry);
846 drm_free(temp_pagelist,
847 (dma->page_count + (count << page_order))
848 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
849 up(&dev->struct_sem);
850 atomic_dec(&dev->buf_alloc);
853 dma->buflist = temp_buflist;
855 for (i = 0; i < entry->buf_count; i++) {
856 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
859 /* No allocations failed, so now we can replace the orginal pagelist
862 if (dma->page_count) {
863 drm_free(dma->pagelist,
864 dma->page_count * sizeof(*dma->pagelist),
867 dma->pagelist = temp_pagelist;
869 dma->buf_count += entry->buf_count;
870 dma->seg_count += entry->seg_count;
871 dma->page_count += entry->seg_count << page_order;
872 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
874 up(&dev->struct_sem);
876 request->count = entry->buf_count;
877 request->size = size;
879 atomic_dec(&dev->buf_alloc);
884 EXPORT_SYMBOL(drm_addbufs_pci);
886 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
888 drm_device_dma_t *dma = dev->dma;
889 drm_buf_entry_t *entry;
891 unsigned long offset;
892 unsigned long agp_offset;
901 drm_buf_t **temp_buflist;
903 if (!drm_core_check_feature(dev, DRIVER_SG))
909 count = request->count;
910 order = drm_order(request->size);
913 alignment = (request->flags & _DRM_PAGE_ALIGN)
914 ? PAGE_ALIGN(size) : size;
915 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
916 total = PAGE_SIZE << page_order;
919 agp_offset = request->agp_start;
921 DRM_DEBUG("count: %d\n", count);
922 DRM_DEBUG("order: %d\n", order);
923 DRM_DEBUG("size: %d\n", size);
924 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
925 DRM_DEBUG("alignment: %d\n", alignment);
926 DRM_DEBUG("page_order: %d\n", page_order);
927 DRM_DEBUG("total: %d\n", total);
929 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
931 if (dev->queue_count)
932 return -EBUSY; /* Not while in use */
934 spin_lock(&dev->count_lock);
936 spin_unlock(&dev->count_lock);
939 atomic_inc(&dev->buf_alloc);
940 spin_unlock(&dev->count_lock);
942 down(&dev->struct_sem);
943 entry = &dma->bufs[order];
944 if (entry->buf_count) {
945 up(&dev->struct_sem);
946 atomic_dec(&dev->buf_alloc);
947 return -ENOMEM; /* May only call once for each order */
950 if (count < 0 || count > 4096) {
951 up(&dev->struct_sem);
952 atomic_dec(&dev->buf_alloc);
956 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
958 if (!entry->buflist) {
959 up(&dev->struct_sem);
960 atomic_dec(&dev->buf_alloc);
963 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
965 entry->buf_size = size;
966 entry->page_order = page_order;
970 while (entry->buf_count < count) {
971 buf = &entry->buflist[entry->buf_count];
972 buf->idx = dma->buf_count + entry->buf_count;
973 buf->total = alignment;
977 buf->offset = (dma->byte_count + offset);
978 buf->bus_address = agp_offset + offset;
979 buf->address = (void *)(agp_offset + offset
980 + (unsigned long)dev->sg->virtual);
984 init_waitqueue_head(&buf->dma_wait);
987 buf->dev_priv_size = dev->driver->dev_priv_size;
988 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
989 if (!buf->dev_private) {
990 /* Set count correctly so we free the proper amount. */
991 entry->buf_count = count;
992 drm_cleanup_buf_error(dev, entry);
993 up(&dev->struct_sem);
994 atomic_dec(&dev->buf_alloc);
998 memset(buf->dev_private, 0, buf->dev_priv_size);
1000 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1002 offset += alignment;
1004 byte_count += PAGE_SIZE << page_order;
1007 DRM_DEBUG("byte_count: %d\n", byte_count);
1009 temp_buflist = drm_realloc(dma->buflist,
1010 dma->buf_count * sizeof(*dma->buflist),
1011 (dma->buf_count + entry->buf_count)
1012 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1013 if (!temp_buflist) {
1014 /* Free the entry because it isn't valid */
1015 drm_cleanup_buf_error(dev, entry);
1016 up(&dev->struct_sem);
1017 atomic_dec(&dev->buf_alloc);
1020 dma->buflist = temp_buflist;
1022 for (i = 0; i < entry->buf_count; i++) {
1023 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1026 dma->buf_count += entry->buf_count;
1027 dma->byte_count += byte_count;
1029 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1030 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1032 up(&dev->struct_sem);
1034 request->count = entry->buf_count;
1035 request->size = size;
1037 dma->flags = _DRM_DMA_USE_SG;
1039 atomic_dec(&dev->buf_alloc);
1043 static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1045 drm_device_dma_t *dma = dev->dma;
1046 drm_buf_entry_t *entry;
1048 unsigned long offset;
1049 unsigned long agp_offset;
1058 drm_buf_t **temp_buflist;
1060 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1066 count = request->count;
1067 order = drm_order(request->size);
1070 alignment = (request->flags & _DRM_PAGE_ALIGN)
1071 ? PAGE_ALIGN(size) : size;
1072 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1073 total = PAGE_SIZE << page_order;
1076 agp_offset = request->agp_start;
1078 DRM_DEBUG("count: %d\n", count);
1079 DRM_DEBUG("order: %d\n", order);
1080 DRM_DEBUG("size: %d\n", size);
1081 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1082 DRM_DEBUG("alignment: %d\n", alignment);
1083 DRM_DEBUG("page_order: %d\n", page_order);
1084 DRM_DEBUG("total: %d\n", total);
1086 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1088 if (dev->queue_count)
1089 return -EBUSY; /* Not while in use */
1091 spin_lock(&dev->count_lock);
1093 spin_unlock(&dev->count_lock);
1096 atomic_inc(&dev->buf_alloc);
1097 spin_unlock(&dev->count_lock);
1099 down(&dev->struct_sem);
1100 entry = &dma->bufs[order];
1101 if (entry->buf_count) {
1102 up(&dev->struct_sem);
1103 atomic_dec(&dev->buf_alloc);
1104 return -ENOMEM; /* May only call once for each order */
1107 if (count < 0 || count > 4096) {
1108 up(&dev->struct_sem);
1109 atomic_dec(&dev->buf_alloc);
1113 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1115 if (!entry->buflist) {
1116 up(&dev->struct_sem);
1117 atomic_dec(&dev->buf_alloc);
1120 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1122 entry->buf_size = size;
1123 entry->page_order = page_order;
1127 while (entry->buf_count < count) {
1128 buf = &entry->buflist[entry->buf_count];
1129 buf->idx = dma->buf_count + entry->buf_count;
1130 buf->total = alignment;
1134 buf->offset = (dma->byte_count + offset);
1135 buf->bus_address = agp_offset + offset;
1136 buf->address = (void *)(agp_offset + offset);
1140 init_waitqueue_head(&buf->dma_wait);
1143 buf->dev_priv_size = dev->driver->dev_priv_size;
1144 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1145 if (!buf->dev_private) {
1146 /* Set count correctly so we free the proper amount. */
1147 entry->buf_count = count;
1148 drm_cleanup_buf_error(dev, entry);
1149 up(&dev->struct_sem);
1150 atomic_dec(&dev->buf_alloc);
1153 memset(buf->dev_private, 0, buf->dev_priv_size);
1155 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1157 offset += alignment;
1159 byte_count += PAGE_SIZE << page_order;
1162 DRM_DEBUG("byte_count: %d\n", byte_count);
1164 temp_buflist = drm_realloc(dma->buflist,
1165 dma->buf_count * sizeof(*dma->buflist),
1166 (dma->buf_count + entry->buf_count)
1167 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1168 if (!temp_buflist) {
1169 /* Free the entry because it isn't valid */
1170 drm_cleanup_buf_error(dev, entry);
1171 up(&dev->struct_sem);
1172 atomic_dec(&dev->buf_alloc);
1175 dma->buflist = temp_buflist;
1177 for (i = 0; i < entry->buf_count; i++) {
1178 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1181 dma->buf_count += entry->buf_count;
1182 dma->byte_count += byte_count;
1184 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1185 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1187 up(&dev->struct_sem);
1189 request->count = entry->buf_count;
1190 request->size = size;
1192 dma->flags = _DRM_DMA_USE_FB;
1194 atomic_dec(&dev->buf_alloc);
1199 * Add buffers for DMA transfers (ioctl).
1201 * \param inode device inode.
1202 * \param filp file pointer.
1203 * \param cmd command.
1204 * \param arg pointer to a drm_buf_desc_t request.
1205 * \return zero on success or a negative number on failure.
1207 * According with the memory type specified in drm_buf_desc::flags and the
1208 * build options, it dispatches the call either to addbufs_agp(),
1209 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1210 * PCI memory respectively.
1212 int drm_addbufs(struct inode *inode, struct file *filp,
1213 unsigned int cmd, unsigned long arg)
1215 drm_buf_desc_t request;
1216 drm_file_t *priv = filp->private_data;
1217 drm_device_t *dev = priv->head->dev;
1220 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1223 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1228 if (request.flags & _DRM_AGP_BUFFER)
1229 ret = drm_addbufs_agp(dev, &request);
1232 if (request.flags & _DRM_SG_BUFFER)
1233 ret = drm_addbufs_sg(dev, &request);
1234 else if (request.flags & _DRM_FB_BUFFER)
1235 ret = drm_addbufs_fb(dev, &request);
1237 ret = drm_addbufs_pci(dev, &request);
1240 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1248 * Get information about the buffer mappings.
1250 * This was originally mean for debugging purposes, or by a sophisticated
1251 * client library to determine how best to use the available buffers (e.g.,
1252 * large buffers can be used for image transfer).
1254 * \param inode device inode.
1255 * \param filp file pointer.
1256 * \param cmd command.
1257 * \param arg pointer to a drm_buf_info structure.
1258 * \return zero on success or a negative number on failure.
1260 * Increments drm_device::buf_use while holding the drm_device::count_lock
1261 * lock, preventing of allocating more buffers after this call. Information
1262 * about each requested buffer is then copied into user space.
1264 int drm_infobufs(struct inode *inode, struct file *filp,
1265 unsigned int cmd, unsigned long arg)
1267 drm_file_t *priv = filp->private_data;
1268 drm_device_t *dev = priv->head->dev;
1269 drm_device_dma_t *dma = dev->dma;
1270 drm_buf_info_t request;
1271 drm_buf_info_t __user *argp = (void __user *)arg;
1275 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1281 spin_lock(&dev->count_lock);
1282 if (atomic_read(&dev->buf_alloc)) {
1283 spin_unlock(&dev->count_lock);
1286 ++dev->buf_use; /* Can't allocate more after this call */
1287 spin_unlock(&dev->count_lock);
1289 if (copy_from_user(&request, argp, sizeof(request)))
1292 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1293 if (dma->bufs[i].buf_count)
1297 DRM_DEBUG("count = %d\n", count);
1299 if (request.count >= count) {
1300 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1301 if (dma->bufs[i].buf_count) {
1302 drm_buf_desc_t __user *to =
1303 &request.list[count];
1304 drm_buf_entry_t *from = &dma->bufs[i];
1305 drm_freelist_t *list = &dma->bufs[i].freelist;
1306 if (copy_to_user(&to->count,
1308 sizeof(from->buf_count)) ||
1309 copy_to_user(&to->size,
1311 sizeof(from->buf_size)) ||
1312 copy_to_user(&to->low_mark,
1314 sizeof(list->low_mark)) ||
1315 copy_to_user(&to->high_mark,
1317 sizeof(list->high_mark)))
1320 DRM_DEBUG("%d %d %d %d %d\n",
1322 dma->bufs[i].buf_count,
1323 dma->bufs[i].buf_size,
1324 dma->bufs[i].freelist.low_mark,
1325 dma->bufs[i].freelist.high_mark);
1330 request.count = count;
1332 if (copy_to_user(argp, &request, sizeof(request)))
1339 * Specifies a low and high water mark for buffer allocation
1341 * \param inode device inode.
1342 * \param filp file pointer.
1343 * \param cmd command.
1344 * \param arg a pointer to a drm_buf_desc structure.
1345 * \return zero on success or a negative number on failure.
1347 * Verifies that the size order is bounded between the admissible orders and
1348 * updates the respective drm_device_dma::bufs entry low and high water mark.
1350 * \note This ioctl is deprecated and mostly never used.
1352 int drm_markbufs(struct inode *inode, struct file *filp,
1353 unsigned int cmd, unsigned long arg)
1355 drm_file_t *priv = filp->private_data;
1356 drm_device_t *dev = priv->head->dev;
1357 drm_device_dma_t *dma = dev->dma;
1358 drm_buf_desc_t request;
1360 drm_buf_entry_t *entry;
1362 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1368 if (copy_from_user(&request,
1369 (drm_buf_desc_t __user *) arg, sizeof(request)))
1372 DRM_DEBUG("%d, %d, %d\n",
1373 request.size, request.low_mark, request.high_mark);
1374 order = drm_order(request.size);
1375 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1377 entry = &dma->bufs[order];
1379 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1381 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1384 entry->freelist.low_mark = request.low_mark;
1385 entry->freelist.high_mark = request.high_mark;
1391 * Unreserve the buffers in list, previously reserved using drmDMA.
1393 * \param inode device inode.
1394 * \param filp file pointer.
1395 * \param cmd command.
1396 * \param arg pointer to a drm_buf_free structure.
1397 * \return zero on success or a negative number on failure.
1399 * Calls free_buffer() for each used buffer.
1400 * This function is primarily used for debugging.
1402 int drm_freebufs(struct inode *inode, struct file *filp,
1403 unsigned int cmd, unsigned long arg)
1405 drm_file_t *priv = filp->private_data;
1406 drm_device_t *dev = priv->head->dev;
1407 drm_device_dma_t *dma = dev->dma;
1408 drm_buf_free_t request;
1413 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1419 if (copy_from_user(&request,
1420 (drm_buf_free_t __user *) arg, sizeof(request)))
1423 DRM_DEBUG("%d\n", request.count);
1424 for (i = 0; i < request.count; i++) {
1425 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1427 if (idx < 0 || idx >= dma->buf_count) {
1428 DRM_ERROR("Index %d (of %d max)\n",
1429 idx, dma->buf_count - 1);
1432 buf = dma->buflist[idx];
1433 if (buf->filp != filp) {
1434 DRM_ERROR("Process %d freeing buffer not owned\n",
1438 drm_free_buffer(dev, buf);
1445 * Maps all of the DMA buffers into client-virtual space (ioctl).
1447 * \param inode device inode.
1448 * \param filp file pointer.
1449 * \param cmd command.
1450 * \param arg pointer to a drm_buf_map structure.
1451 * \return zero on success or a negative number on failure.
1453 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1454 * about each buffer into user space. The PCI buffers are already mapped on the
1455 * addbufs_pci() call.
1457 int drm_mapbufs(struct inode *inode, struct file *filp,
1458 unsigned int cmd, unsigned long arg)
1460 drm_file_t *priv = filp->private_data;
1461 drm_device_t *dev = priv->head->dev;
1462 drm_device_dma_t *dma = dev->dma;
1463 drm_buf_map_t __user *argp = (void __user *)arg;
1466 unsigned long virtual;
1467 unsigned long address;
1468 drm_buf_map_t request;
1471 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1477 spin_lock(&dev->count_lock);
1478 if (atomic_read(&dev->buf_alloc)) {
1479 spin_unlock(&dev->count_lock);
1482 dev->buf_use++; /* Can't allocate more after this call */
1483 spin_unlock(&dev->count_lock);
1485 if (copy_from_user(&request, argp, sizeof(request)))
1488 if (request.count >= dma->buf_count) {
1489 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1490 || (drm_core_check_feature(dev, DRIVER_SG)
1491 && (dma->flags & _DRM_DMA_USE_SG))
1492 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493 && (dma->flags & _DRM_DMA_USE_FB))) {
1494 drm_map_t *map = dev->agp_buffer_map;
1495 unsigned long token = dev->agp_buffer_token;
1502 down_write(¤t->mm->mmap_sem);
1503 virtual = do_mmap(filp, 0, map->size,
1504 PROT_READ | PROT_WRITE,
1506 up_write(¤t->mm->mmap_sem);
1508 down_write(¤t->mm->mmap_sem);
1509 virtual = do_mmap(filp, 0, dma->byte_count,
1510 PROT_READ | PROT_WRITE,
1512 up_write(¤t->mm->mmap_sem);
1514 if (virtual > -1024UL) {
1516 retcode = (signed long)virtual;
1519 request.virtual = (void __user *)virtual;
1521 for (i = 0; i < dma->buf_count; i++) {
1522 if (copy_to_user(&request.list[i].idx,
1523 &dma->buflist[i]->idx,
1524 sizeof(request.list[0].idx))) {
1528 if (copy_to_user(&request.list[i].total,
1529 &dma->buflist[i]->total,
1530 sizeof(request.list[0].total))) {
1534 if (copy_to_user(&request.list[i].used,
1535 &zero, sizeof(zero))) {
1539 address = virtual + dma->buflist[i]->offset; /* *** */
1540 if (copy_to_user(&request.list[i].address,
1541 &address, sizeof(address))) {
1548 request.count = dma->buf_count;
1549 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1551 if (copy_to_user(argp, &request, sizeof(request)))
1558 * Compute size order. Returns the exponent of the smaller power of two which
1559 * is greater or equal to given number.
1564 * \todo Can be made faster.
1566 int drm_order(unsigned long size)
1571 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1573 if (size & (size - 1))
1579 EXPORT_SYMBOL(drm_order);