3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
46 * \todo Can be made faster.
48 int drm_order( unsigned long size )
53 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
56 if (size & (size - 1))
61 EXPORT_SYMBOL(drm_order);
65 * Used to allocate 32-bit handles for _DRM_SHM regions
66 * The 0x10000000 value is chosen to be out of the way of
67 * FB/register and GART physical addresses.
69 static unsigned int map32_handle = 0x10000000;
73 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
75 * \param inode device inode.
76 * \param filp file pointer.
78 * \param arg pointer to a drm_map structure.
79 * \return zero on success or a negative value on error.
81 * Adjusts the memory offset to its absolute value according to the mapping
82 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
83 * applicable and if supported by the kernel.
85 int drm_addmap( struct inode *inode, struct file *filp,
86 unsigned int cmd, unsigned long arg )
88 drm_file_t *priv = filp->private_data;
89 drm_device_t *dev = priv->head->dev;
91 drm_map_t __user *argp = (void __user *)arg;
94 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
96 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
100 if ( copy_from_user( map, argp, sizeof(*map) ) ) {
101 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
105 /* Only allow shared memory to be removable since we only keep enough
106 * book keeping information about shared memory to allow for removal
107 * when processes fork.
109 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
110 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
113 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
114 map->offset, map->size, map->type );
115 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
116 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
122 switch ( map->type ) {
124 case _DRM_FRAME_BUFFER:
125 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
126 if ( map->offset + map->size < map->offset ||
127 map->offset < virt_to_phys(high_memory) ) {
128 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
133 map->offset += dev->hose->mem_space->start;
135 if (drm_core_has_MTRR(dev)) {
136 if ( map->type == _DRM_FRAME_BUFFER ||
137 (map->flags & _DRM_WRITE_COMBINING) ) {
138 map->mtrr = mtrr_add( map->offset, map->size,
139 MTRR_TYPE_WRCOMB, 1 );
142 if (map->type == _DRM_REGISTERS)
143 map->handle = drm_ioremap( map->offset, map->size,
148 map->handle = vmalloc_32(map->size);
149 DRM_DEBUG( "%lu %d %p\n",
150 map->size, drm_order( map->size ), map->handle );
151 if ( !map->handle ) {
152 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
155 map->offset = (unsigned long)map->handle;
156 if ( map->flags & _DRM_CONTAINS_LOCK ) {
157 /* Prevent a 2nd X Server from creating a 2nd lock */
158 if (dev->lock.hw_lock != NULL) {
159 vfree( map->handle );
160 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
164 dev->lock.hw_lock = map->handle; /* Pointer to lock */
168 if (drm_core_has_AGP(dev)) {
170 map->offset += dev->hose->mem_space->start;
172 map->offset += dev->agp->base;
173 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
176 case _DRM_SCATTER_GATHER:
178 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
181 map->offset += dev->sg->handle;
183 case _DRM_CONSISTENT:
185 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
186 * As we're limit the address to 2^32-1 (or lses),
187 * casting it down to 32 bits is no problem, but we
188 * need to point to a 64bit variable first. */
190 map->handle = drm_pci_alloc(dev, map->size, map->size,
191 0xffffffffUL, &bus_addr);
192 map->offset = (unsigned long)bus_addr;
194 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
200 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
204 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
206 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
209 memset(list, 0, sizeof(*list));
212 down(&dev->struct_sem);
213 list_add(&list->head, &dev->maplist->head);
215 /* Assign a 32-bit handle for _DRM_SHM mappings */
216 /* We do it here so that dev->struct_sem protects the increment */
217 if (map->type == _DRM_SHM)
218 map->offset = map32_handle += PAGE_SIZE;
220 up(&dev->struct_sem);
222 if ( copy_to_user( argp, map, sizeof(*map) ) )
224 if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
231 * Remove a map private from list and deallocate resources if the mapping
234 * \param inode device inode.
235 * \param filp file pointer.
236 * \param cmd command.
237 * \param arg pointer to a drm_map_t structure.
238 * \return zero on success or a negative value on error.
240 * Searches the map on drm_device::maplist, removes it from the list, see if
241 * its being used, and free any associate resource (such as MTRR's) if it's not
246 int drm_rmmap(struct inode *inode, struct file *filp,
247 unsigned int cmd, unsigned long arg)
249 drm_file_t *priv = filp->private_data;
250 drm_device_t *dev = priv->head->dev;
251 struct list_head *list;
252 drm_map_list_t *r_list = NULL;
253 drm_vma_entry_t *pt, *prev;
258 if (copy_from_user(&request, (drm_map_t __user *)arg,
263 down(&dev->struct_sem);
264 list = &dev->maplist->head;
265 list_for_each(list, &dev->maplist->head) {
266 r_list = list_entry(list, drm_map_list_t, head);
269 r_list->map->offset == (unsigned long) request.handle &&
270 r_list->map->flags & _DRM_REMOVABLE) break;
273 /* List has wrapped around to the head pointer, or its empty we didn't
276 if(list == (&dev->maplist->head)) {
277 up(&dev->struct_sem);
282 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
284 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
285 if (pt->vma->vm_private_data == map) found_maps++;
291 case _DRM_FRAME_BUFFER:
292 if (drm_core_has_MTRR(dev)) {
293 if (map->mtrr >= 0) {
295 retcode = mtrr_del(map->mtrr,
298 DRM_DEBUG("mtrr_del = %d\n", retcode);
301 drm_ioremapfree(map->handle, map->size, dev);
307 case _DRM_SCATTER_GATHER:
309 case _DRM_CONSISTENT:
310 drm_pci_free(dev, map->size, map->handle, map->offset);
313 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
315 up(&dev->struct_sem);
320 * Cleanup after an error on one of the addbufs() functions.
322 * \param entry buffer entry where the error occurred.
324 * Frees any pages and buffers associated with the given entry.
326 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
330 if (entry->seg_count) {
331 for (i = 0; i < entry->seg_count; i++) {
332 if (entry->seglist[i]) {
333 drm_free_pages(entry->seglist[i],
338 drm_free(entry->seglist,
340 sizeof(*entry->seglist),
343 entry->seg_count = 0;
346 if (entry->buf_count) {
347 for (i = 0; i < entry->buf_count; i++) {
348 if (entry->buflist[i].dev_private) {
349 drm_free(entry->buflist[i].dev_private,
350 entry->buflist[i].dev_priv_size,
354 drm_free(entry->buflist,
356 sizeof(*entry->buflist),
359 entry->buf_count = 0;
365 * Add AGP buffers for DMA transfers.
367 * \param dev drm_device_t to which the buffers are to be added.
368 * \param request pointer to a drm_buf_desc_t describing the request.
369 * \return zero on success or a negative number on failure.
371 * After some sanity checks creates a drm_buf structure for each buffer and
372 * reallocates the buffer list of the same size order to accommodate the new
375 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
377 drm_device_dma_t *dma = dev->dma;
378 drm_buf_entry_t *entry;
380 unsigned long offset;
381 unsigned long agp_offset;
390 drm_buf_t **temp_buflist;
392 if ( !dma ) return -EINVAL;
394 count = request->count;
395 order = drm_order(request->size);
398 alignment = (request->flags & _DRM_PAGE_ALIGN)
399 ? PAGE_ALIGN(size) : size;
400 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
401 total = PAGE_SIZE << page_order;
404 agp_offset = dev->agp->base + request->agp_start;
406 DRM_DEBUG( "count: %d\n", count );
407 DRM_DEBUG( "order: %d\n", order );
408 DRM_DEBUG( "size: %d\n", size );
409 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
410 DRM_DEBUG( "alignment: %d\n", alignment );
411 DRM_DEBUG( "page_order: %d\n", page_order );
412 DRM_DEBUG( "total: %d\n", total );
414 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
415 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
417 spin_lock( &dev->count_lock );
418 if ( dev->buf_use ) {
419 spin_unlock( &dev->count_lock );
422 atomic_inc( &dev->buf_alloc );
423 spin_unlock( &dev->count_lock );
425 down( &dev->struct_sem );
426 entry = &dma->bufs[order];
427 if ( entry->buf_count ) {
428 up( &dev->struct_sem );
429 atomic_dec( &dev->buf_alloc );
430 return -ENOMEM; /* May only call once for each order */
433 if (count < 0 || count > 4096) {
434 up( &dev->struct_sem );
435 atomic_dec( &dev->buf_alloc );
439 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
441 if ( !entry->buflist ) {
442 up( &dev->struct_sem );
443 atomic_dec( &dev->buf_alloc );
446 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
448 entry->buf_size = size;
449 entry->page_order = page_order;
453 while ( entry->buf_count < count ) {
454 buf = &entry->buflist[entry->buf_count];
455 buf->idx = dma->buf_count + entry->buf_count;
456 buf->total = alignment;
460 buf->offset = (dma->byte_count + offset);
461 buf->bus_address = agp_offset + offset;
462 buf->address = (void *)(agp_offset + offset);
466 init_waitqueue_head( &buf->dma_wait );
469 buf->dev_priv_size = dev->driver->dev_priv_size;
470 buf->dev_private = drm_alloc( buf->dev_priv_size,
472 if(!buf->dev_private) {
473 /* Set count correctly so we free the proper amount. */
474 entry->buf_count = count;
475 drm_cleanup_buf_error(dev,entry);
476 up( &dev->struct_sem );
477 atomic_dec( &dev->buf_alloc );
480 memset( buf->dev_private, 0, buf->dev_priv_size );
482 DRM_DEBUG( "buffer %d @ %p\n",
483 entry->buf_count, buf->address );
487 byte_count += PAGE_SIZE << page_order;
490 DRM_DEBUG( "byte_count: %d\n", byte_count );
492 temp_buflist = drm_realloc( dma->buflist,
493 dma->buf_count * sizeof(*dma->buflist),
494 (dma->buf_count + entry->buf_count)
495 * sizeof(*dma->buflist),
498 /* Free the entry because it isn't valid */
499 drm_cleanup_buf_error(dev,entry);
500 up( &dev->struct_sem );
501 atomic_dec( &dev->buf_alloc );
504 dma->buflist = temp_buflist;
506 for ( i = 0 ; i < entry->buf_count ; i++ ) {
507 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
510 dma->buf_count += entry->buf_count;
511 dma->byte_count += byte_count;
513 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
514 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
516 up( &dev->struct_sem );
518 request->count = entry->buf_count;
519 request->size = size;
521 dma->flags = _DRM_DMA_USE_AGP;
523 atomic_dec( &dev->buf_alloc );
526 #endif /* __OS_HAS_AGP */
528 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
530 drm_device_dma_t *dma = dev->dma;
536 drm_buf_entry_t *entry;
540 unsigned long offset;
544 unsigned long *temp_pagelist;
545 drm_buf_t **temp_buflist;
547 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
548 if ( !dma ) return -EINVAL;
550 count = request->count;
551 order = drm_order(request->size);
554 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
555 request->count, request->size, size,
556 order, dev->queue_count );
558 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
559 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
561 alignment = (request->flags & _DRM_PAGE_ALIGN)
562 ? PAGE_ALIGN(size) : size;
563 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
564 total = PAGE_SIZE << page_order;
566 spin_lock( &dev->count_lock );
567 if ( dev->buf_use ) {
568 spin_unlock( &dev->count_lock );
571 atomic_inc( &dev->buf_alloc );
572 spin_unlock( &dev->count_lock );
574 down( &dev->struct_sem );
575 entry = &dma->bufs[order];
576 if ( entry->buf_count ) {
577 up( &dev->struct_sem );
578 atomic_dec( &dev->buf_alloc );
579 return -ENOMEM; /* May only call once for each order */
582 if (count < 0 || count > 4096) {
583 up( &dev->struct_sem );
584 atomic_dec( &dev->buf_alloc );
588 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
590 if ( !entry->buflist ) {
591 up( &dev->struct_sem );
592 atomic_dec( &dev->buf_alloc );
595 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
597 entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
599 if ( !entry->seglist ) {
600 drm_free( entry->buflist,
601 count * sizeof(*entry->buflist),
603 up( &dev->struct_sem );
604 atomic_dec( &dev->buf_alloc );
607 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
609 /* Keep the original pagelist until we know all the allocations
612 temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
613 * sizeof(*dma->pagelist),
615 if (!temp_pagelist) {
616 drm_free( entry->buflist,
617 count * sizeof(*entry->buflist),
619 drm_free( entry->seglist,
620 count * sizeof(*entry->seglist),
622 up( &dev->struct_sem );
623 atomic_dec( &dev->buf_alloc );
626 memcpy(temp_pagelist,
628 dma->page_count * sizeof(*dma->pagelist));
629 DRM_DEBUG( "pagelist: %d entries\n",
630 dma->page_count + (count << page_order) );
632 entry->buf_size = size;
633 entry->page_order = page_order;
637 while ( entry->buf_count < count ) {
638 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
640 /* Set count correctly so we free the proper amount. */
641 entry->buf_count = count;
642 entry->seg_count = count;
643 drm_cleanup_buf_error(dev, entry);
644 drm_free( temp_pagelist,
645 (dma->page_count + (count << page_order))
646 * sizeof(*dma->pagelist),
648 up( &dev->struct_sem );
649 atomic_dec( &dev->buf_alloc );
652 entry->seglist[entry->seg_count++] = page;
653 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
654 DRM_DEBUG( "page %d @ 0x%08lx\n",
655 dma->page_count + page_count,
656 page + PAGE_SIZE * i );
657 temp_pagelist[dma->page_count + page_count++]
658 = page + PAGE_SIZE * i;
661 offset + size <= total && entry->buf_count < count ;
662 offset += alignment, ++entry->buf_count ) {
663 buf = &entry->buflist[entry->buf_count];
664 buf->idx = dma->buf_count + entry->buf_count;
665 buf->total = alignment;
668 buf->offset = (dma->byte_count + byte_count + offset);
669 buf->address = (void *)(page + offset);
673 init_waitqueue_head( &buf->dma_wait );
676 buf->dev_priv_size = dev->driver->dev_priv_size;
677 buf->dev_private = drm_alloc( buf->dev_priv_size,
679 if(!buf->dev_private) {
680 /* Set count correctly so we free the proper amount. */
681 entry->buf_count = count;
682 entry->seg_count = count;
683 drm_cleanup_buf_error(dev,entry);
684 drm_free( temp_pagelist,
685 (dma->page_count + (count << page_order))
686 * sizeof(*dma->pagelist),
688 up( &dev->struct_sem );
689 atomic_dec( &dev->buf_alloc );
692 memset( buf->dev_private, 0, buf->dev_priv_size );
694 DRM_DEBUG( "buffer %d @ %p\n",
695 entry->buf_count, buf->address );
697 byte_count += PAGE_SIZE << page_order;
700 temp_buflist = drm_realloc( dma->buflist,
701 dma->buf_count * sizeof(*dma->buflist),
702 (dma->buf_count + entry->buf_count)
703 * sizeof(*dma->buflist),
706 /* Free the entry because it isn't valid */
707 drm_cleanup_buf_error(dev,entry);
708 drm_free( temp_pagelist,
709 (dma->page_count + (count << page_order))
710 * sizeof(*dma->pagelist),
712 up( &dev->struct_sem );
713 atomic_dec( &dev->buf_alloc );
716 dma->buflist = temp_buflist;
718 for ( i = 0 ; i < entry->buf_count ; i++ ) {
719 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
722 /* No allocations failed, so now we can replace the orginal pagelist
725 if (dma->page_count) {
726 drm_free(dma->pagelist,
727 dma->page_count * sizeof(*dma->pagelist),
730 dma->pagelist = temp_pagelist;
732 dma->buf_count += entry->buf_count;
733 dma->seg_count += entry->seg_count;
734 dma->page_count += entry->seg_count << page_order;
735 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
737 up( &dev->struct_sem );
739 request->count = entry->buf_count;
740 request->size = size;
742 atomic_dec( &dev->buf_alloc );
747 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
749 drm_device_dma_t *dma = dev->dma;
750 drm_buf_entry_t *entry;
752 unsigned long offset;
753 unsigned long agp_offset;
762 drm_buf_t **temp_buflist;
764 if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
766 if ( !dma ) return -EINVAL;
768 count = request->count;
769 order = drm_order(request->size);
772 alignment = (request->flags & _DRM_PAGE_ALIGN)
773 ? PAGE_ALIGN(size) : size;
774 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
775 total = PAGE_SIZE << page_order;
778 agp_offset = request->agp_start;
780 DRM_DEBUG( "count: %d\n", count );
781 DRM_DEBUG( "order: %d\n", order );
782 DRM_DEBUG( "size: %d\n", size );
783 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
784 DRM_DEBUG( "alignment: %d\n", alignment );
785 DRM_DEBUG( "page_order: %d\n", page_order );
786 DRM_DEBUG( "total: %d\n", total );
788 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
789 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
791 spin_lock( &dev->count_lock );
792 if ( dev->buf_use ) {
793 spin_unlock( &dev->count_lock );
796 atomic_inc( &dev->buf_alloc );
797 spin_unlock( &dev->count_lock );
799 down( &dev->struct_sem );
800 entry = &dma->bufs[order];
801 if ( entry->buf_count ) {
802 up( &dev->struct_sem );
803 atomic_dec( &dev->buf_alloc );
804 return -ENOMEM; /* May only call once for each order */
807 if (count < 0 || count > 4096) {
808 up( &dev->struct_sem );
809 atomic_dec( &dev->buf_alloc );
813 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
815 if ( !entry->buflist ) {
816 up( &dev->struct_sem );
817 atomic_dec( &dev->buf_alloc );
820 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
822 entry->buf_size = size;
823 entry->page_order = page_order;
827 while ( entry->buf_count < count ) {
828 buf = &entry->buflist[entry->buf_count];
829 buf->idx = dma->buf_count + entry->buf_count;
830 buf->total = alignment;
834 buf->offset = (dma->byte_count + offset);
835 buf->bus_address = agp_offset + offset;
836 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
840 init_waitqueue_head( &buf->dma_wait );
843 buf->dev_priv_size = dev->driver->dev_priv_size;
844 buf->dev_private = drm_alloc( buf->dev_priv_size,
846 if(!buf->dev_private) {
847 /* Set count correctly so we free the proper amount. */
848 entry->buf_count = count;
849 drm_cleanup_buf_error(dev,entry);
850 up( &dev->struct_sem );
851 atomic_dec( &dev->buf_alloc );
855 memset( buf->dev_private, 0, buf->dev_priv_size );
857 DRM_DEBUG( "buffer %d @ %p\n",
858 entry->buf_count, buf->address );
862 byte_count += PAGE_SIZE << page_order;
865 DRM_DEBUG( "byte_count: %d\n", byte_count );
867 temp_buflist = drm_realloc( dma->buflist,
868 dma->buf_count * sizeof(*dma->buflist),
869 (dma->buf_count + entry->buf_count)
870 * sizeof(*dma->buflist),
873 /* Free the entry because it isn't valid */
874 drm_cleanup_buf_error(dev,entry);
875 up( &dev->struct_sem );
876 atomic_dec( &dev->buf_alloc );
879 dma->buflist = temp_buflist;
881 for ( i = 0 ; i < entry->buf_count ; i++ ) {
882 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
885 dma->buf_count += entry->buf_count;
886 dma->byte_count += byte_count;
888 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
889 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
891 up( &dev->struct_sem );
893 request->count = entry->buf_count;
894 request->size = size;
896 dma->flags = _DRM_DMA_USE_SG;
898 atomic_dec( &dev->buf_alloc );
902 int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
904 drm_device_dma_t *dma = dev->dma;
905 drm_buf_entry_t *entry;
907 unsigned long offset;
908 unsigned long agp_offset;
917 drm_buf_t **temp_buflist;
919 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
925 count = request->count;
926 order = drm_order(request->size);
929 alignment = (request->flags & _DRM_PAGE_ALIGN)
930 ? PAGE_ALIGN(size) : size;
931 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
932 total = PAGE_SIZE << page_order;
935 agp_offset = request->agp_start;
937 DRM_DEBUG("count: %d\n", count);
938 DRM_DEBUG("order: %d\n", order);
939 DRM_DEBUG("size: %d\n", size);
940 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
941 DRM_DEBUG("alignment: %d\n", alignment);
942 DRM_DEBUG("page_order: %d\n", page_order);
943 DRM_DEBUG("total: %d\n", total);
945 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
947 if (dev->queue_count)
948 return -EBUSY; /* Not while in use */
950 spin_lock(&dev->count_lock);
952 spin_unlock(&dev->count_lock);
955 atomic_inc(&dev->buf_alloc);
956 spin_unlock(&dev->count_lock);
958 down(&dev->struct_sem);
959 entry = &dma->bufs[order];
960 if (entry->buf_count) {
961 up(&dev->struct_sem);
962 atomic_dec(&dev->buf_alloc);
963 return -ENOMEM; /* May only call once for each order */
966 if (count < 0 || count > 4096) {
967 up(&dev->struct_sem);
968 atomic_dec(&dev->buf_alloc);
972 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
974 if (!entry->buflist) {
975 up(&dev->struct_sem);
976 atomic_dec(&dev->buf_alloc);
979 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
981 entry->buf_size = size;
982 entry->page_order = page_order;
986 while (entry->buf_count < count) {
987 buf = &entry->buflist[entry->buf_count];
988 buf->idx = dma->buf_count + entry->buf_count;
989 buf->total = alignment;
993 buf->offset = (dma->byte_count + offset);
994 buf->bus_address = agp_offset + offset;
995 buf->address = (void *)(agp_offset + offset);
999 init_waitqueue_head(&buf->dma_wait);
1002 buf->dev_priv_size = dev->driver->dev_priv_size;
1003 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1004 if (!buf->dev_private) {
1005 /* Set count correctly so we free the proper amount. */
1006 entry->buf_count = count;
1007 drm_cleanup_buf_error(dev, entry);
1008 up(&dev->struct_sem);
1009 atomic_dec(&dev->buf_alloc);
1012 memset(buf->dev_private, 0, buf->dev_priv_size);
1014 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1016 offset += alignment;
1018 byte_count += PAGE_SIZE << page_order;
1021 DRM_DEBUG("byte_count: %d\n", byte_count);
1023 temp_buflist = drm_realloc(dma->buflist,
1024 dma->buf_count * sizeof(*dma->buflist),
1025 (dma->buf_count + entry->buf_count)
1026 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1027 if (!temp_buflist) {
1028 /* Free the entry because it isn't valid */
1029 drm_cleanup_buf_error(dev, entry);
1030 up(&dev->struct_sem);
1031 atomic_dec(&dev->buf_alloc);
1034 dma->buflist = temp_buflist;
1036 for (i = 0; i < entry->buf_count; i++) {
1037 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1040 dma->buf_count += entry->buf_count;
1041 dma->byte_count += byte_count;
1043 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1044 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1046 up(&dev->struct_sem);
1048 request->count = entry->buf_count;
1049 request->size = size;
1051 dma->flags = _DRM_DMA_USE_FB;
1053 atomic_dec(&dev->buf_alloc);
1058 * Add buffers for DMA transfers (ioctl).
1060 * \param inode device inode.
1061 * \param filp file pointer.
1062 * \param cmd command.
1063 * \param arg pointer to a drm_buf_desc_t request.
1064 * \return zero on success or a negative number on failure.
1066 * According with the memory type specified in drm_buf_desc::flags and the
1067 * build options, it dispatches the call either to addbufs_agp(),
1068 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1069 * PCI memory respectively.
1071 int drm_addbufs( struct inode *inode, struct file *filp,
1072 unsigned int cmd, unsigned long arg )
1074 drm_buf_desc_t request;
1075 drm_file_t *priv = filp->private_data;
1076 drm_device_t *dev = priv->head->dev;
1079 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1082 if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1087 if ( request.flags & _DRM_AGP_BUFFER )
1088 ret=drm_addbufs_agp(dev, &request);
1091 if ( request.flags & _DRM_SG_BUFFER )
1092 ret=drm_addbufs_sg(dev, &request);
1093 else if ( request.flags & _DRM_FB_BUFFER)
1094 ret=drm_addbufs_fb(dev, &request);
1096 ret=drm_addbufs_pci(dev, &request);
1099 if (copy_to_user((void __user *)arg, &request,
1109 * Get information about the buffer mappings.
1111 * This was originally mean for debugging purposes, or by a sophisticated
1112 * client library to determine how best to use the available buffers (e.g.,
1113 * large buffers can be used for image transfer).
1115 * \param inode device inode.
1116 * \param filp file pointer.
1117 * \param cmd command.
1118 * \param arg pointer to a drm_buf_info structure.
1119 * \return zero on success or a negative number on failure.
1121 * Increments drm_device::buf_use while holding the drm_device::count_lock
1122 * lock, preventing of allocating more buffers after this call. Information
1123 * about each requested buffer is then copied into user space.
1125 int drm_infobufs( struct inode *inode, struct file *filp,
1126 unsigned int cmd, unsigned long arg )
1128 drm_file_t *priv = filp->private_data;
1129 drm_device_t *dev = priv->head->dev;
1130 drm_device_dma_t *dma = dev->dma;
1131 drm_buf_info_t request;
1132 drm_buf_info_t __user *argp = (void __user *)arg;
1136 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1139 if ( !dma ) return -EINVAL;
1141 spin_lock( &dev->count_lock );
1142 if ( atomic_read( &dev->buf_alloc ) ) {
1143 spin_unlock( &dev->count_lock );
1146 ++dev->buf_use; /* Can't allocate more after this call */
1147 spin_unlock( &dev->count_lock );
1149 if ( copy_from_user( &request, argp, sizeof(request) ) )
1152 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1153 if ( dma->bufs[i].buf_count ) ++count;
1156 DRM_DEBUG( "count = %d\n", count );
1158 if ( request.count >= count ) {
1159 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1160 if ( dma->bufs[i].buf_count ) {
1161 drm_buf_desc_t __user *to = &request.list[count];
1162 drm_buf_entry_t *from = &dma->bufs[i];
1163 drm_freelist_t *list = &dma->bufs[i].freelist;
1164 if ( copy_to_user( &to->count,
1166 sizeof(from->buf_count) ) ||
1167 copy_to_user( &to->size,
1169 sizeof(from->buf_size) ) ||
1170 copy_to_user( &to->low_mark,
1172 sizeof(list->low_mark) ) ||
1173 copy_to_user( &to->high_mark,
1175 sizeof(list->high_mark) ) )
1178 DRM_DEBUG( "%d %d %d %d %d\n",
1180 dma->bufs[i].buf_count,
1181 dma->bufs[i].buf_size,
1182 dma->bufs[i].freelist.low_mark,
1183 dma->bufs[i].freelist.high_mark );
1188 request.count = count;
1190 if ( copy_to_user( argp, &request, sizeof(request) ) )
1197 * Specifies a low and high water mark for buffer allocation
1199 * \param inode device inode.
1200 * \param filp file pointer.
1201 * \param cmd command.
1202 * \param arg a pointer to a drm_buf_desc structure.
1203 * \return zero on success or a negative number on failure.
1205 * Verifies that the size order is bounded between the admissible orders and
1206 * updates the respective drm_device_dma::bufs entry low and high water mark.
1208 * \note This ioctl is deprecated and mostly never used.
1210 int drm_markbufs( struct inode *inode, struct file *filp,
1211 unsigned int cmd, unsigned long arg )
1213 drm_file_t *priv = filp->private_data;
1214 drm_device_t *dev = priv->head->dev;
1215 drm_device_dma_t *dma = dev->dma;
1216 drm_buf_desc_t request;
1218 drm_buf_entry_t *entry;
1220 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1223 if ( !dma ) return -EINVAL;
1225 if ( copy_from_user( &request,
1226 (drm_buf_desc_t __user *)arg,
1230 DRM_DEBUG( "%d, %d, %d\n",
1231 request.size, request.low_mark, request.high_mark );
1232 order = drm_order( request.size );
1233 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1234 entry = &dma->bufs[order];
1236 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1238 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1241 entry->freelist.low_mark = request.low_mark;
1242 entry->freelist.high_mark = request.high_mark;
1248 * Unreserve the buffers in list, previously reserved using drmDMA.
1250 * \param inode device inode.
1251 * \param filp file pointer.
1252 * \param cmd command.
1253 * \param arg pointer to a drm_buf_free structure.
1254 * \return zero on success or a negative number on failure.
1256 * Calls free_buffer() for each used buffer.
1257 * This function is primarily used for debugging.
1259 int drm_freebufs( struct inode *inode, struct file *filp,
1260 unsigned int cmd, unsigned long arg )
1262 drm_file_t *priv = filp->private_data;
1263 drm_device_t *dev = priv->head->dev;
1264 drm_device_dma_t *dma = dev->dma;
1265 drm_buf_free_t request;
1270 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1273 if ( !dma ) return -EINVAL;
1275 if ( copy_from_user( &request,
1276 (drm_buf_free_t __user *)arg,
1280 DRM_DEBUG( "%d\n", request.count );
1281 for ( i = 0 ; i < request.count ; i++ ) {
1282 if ( copy_from_user( &idx,
1286 if ( idx < 0 || idx >= dma->buf_count ) {
1287 DRM_ERROR( "Index %d (of %d max)\n",
1288 idx, dma->buf_count - 1 );
1291 buf = dma->buflist[idx];
1292 if ( buf->filp != filp ) {
1293 DRM_ERROR( "Process %d freeing buffer not owned\n",
1297 drm_free_buffer( dev, buf );
1304 * Maps all of the DMA buffers into client-virtual space (ioctl).
1306 * \param inode device inode.
1307 * \param filp file pointer.
1308 * \param cmd command.
1309 * \param arg pointer to a drm_buf_map structure.
1310 * \return zero on success or a negative number on failure.
1312 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1313 * about each buffer into user space. The PCI buffers are already mapped on the
1314 * addbufs_pci() call.
1316 int drm_mapbufs( struct inode *inode, struct file *filp,
1317 unsigned int cmd, unsigned long arg )
1319 drm_file_t *priv = filp->private_data;
1320 drm_device_t *dev = priv->head->dev;
1321 drm_device_dma_t *dma = dev->dma;
1322 drm_buf_map_t __user *argp = (void __user *)arg;
1325 unsigned long virtual;
1326 unsigned long address;
1327 drm_buf_map_t request;
1330 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1333 if ( !dma ) return -EINVAL;
1335 spin_lock( &dev->count_lock );
1336 if ( atomic_read( &dev->buf_alloc ) ) {
1337 spin_unlock( &dev->count_lock );
1340 dev->buf_use++; /* Can't allocate more after this call */
1341 spin_unlock( &dev->count_lock );
1343 if ( copy_from_user( &request, argp, sizeof(request) ) )
1346 if ( request.count >= dma->buf_count ) {
1347 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1348 || (drm_core_check_feature(dev, DRIVER_SG)
1349 && (dma->flags & _DRM_DMA_USE_SG))
1350 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1351 && (dma->flags & _DRM_DMA_USE_FB))) {
1352 drm_map_t *map = dev->agp_buffer_map;
1359 #if LINUX_VERSION_CODE <= 0x020402
1360 down( ¤t->mm->mmap_sem );
1362 down_write( ¤t->mm->mmap_sem );
1364 virtual = do_mmap( filp, 0, map->size,
1365 PROT_READ | PROT_WRITE,
1367 (unsigned long)map->offset );
1368 #if LINUX_VERSION_CODE <= 0x020402
1369 up( ¤t->mm->mmap_sem );
1371 up_write( ¤t->mm->mmap_sem );
1374 #if LINUX_VERSION_CODE <= 0x020402
1375 down( ¤t->mm->mmap_sem );
1377 down_write( ¤t->mm->mmap_sem );
1379 virtual = do_mmap( filp, 0, dma->byte_count,
1380 PROT_READ | PROT_WRITE,
1382 #if LINUX_VERSION_CODE <= 0x020402
1383 up( ¤t->mm->mmap_sem );
1385 up_write( ¤t->mm->mmap_sem );
1388 if ( virtual > -1024UL ) {
1390 retcode = (signed long)virtual;
1393 request.virtual = (void __user *)virtual;
1395 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1396 if ( copy_to_user( &request.list[i].idx,
1397 &dma->buflist[i]->idx,
1398 sizeof(request.list[0].idx) ) ) {
1402 if ( copy_to_user( &request.list[i].total,
1403 &dma->buflist[i]->total,
1404 sizeof(request.list[0].total) ) ) {
1408 if ( copy_to_user( &request.list[i].used,
1414 address = virtual + dma->buflist[i]->offset; /* *** */
1415 if ( copy_to_user( &request.list[i].address,
1417 sizeof(address) ) ) {
1424 request.count = dma->buf_count;
1425 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1427 if ( copy_to_user( argp, &request, sizeof(request) ) )