]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/char/drm/drm_bufs.c
drm: update some function so a driver can call them
[linux-beck.git] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 /**
40  * Compute size order.  Returns the exponent of the smaller power of two which
41  * is greater or equal to given number.
42  * 
43  * \param size size.
44  * \return order.
45  *
46  * \todo Can be made faster.
47  */
48 int drm_order( unsigned long size )
49 {
50         int order;
51         unsigned long tmp;
52
53         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
54                 ;
55
56         if (size & (size - 1))
57                 ++order;
58
59         return order;
60 }
61 EXPORT_SYMBOL(drm_order);
62
63 #ifdef CONFIG_COMPAT
64 /*
65  * Used to allocate 32-bit handles for _DRM_SHM regions
66  * The 0x10000000 value is chosen to be out of the way of
67  * FB/register and GART physical addresses.
68  */
69 static unsigned int map32_handle = 0x10000000;
70 #endif
71
72 /**
73  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
74  *
75  * \param inode device inode.
76  * \param filp file pointer.
77  * \param cmd command.
78  * \param arg pointer to a drm_map structure.
79  * \return zero on success or a negative value on error.
80  *
81  * Adjusts the memory offset to its absolute value according to the mapping
82  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
83  * applicable and if supported by the kernel.
84  */
85 int drm_addmap(drm_device_t * dev, unsigned int offset,
86                unsigned int size, drm_map_type_t type,
87                drm_map_flags_t flags, drm_local_map_t ** map_ptr)
88 {
89         drm_map_t *map;
90         drm_map_list_t *list;
91         drm_dma_handle_t *dmah;
92
93         map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
94         if ( !map )
95                 return -ENOMEM;
96
97         map->offset = offset;
98         map->size = size;
99         map->flags = flags;
100         map->type = type;
101
102         /* Only allow shared memory to be removable since we only keep enough
103          * book keeping information about shared memory to allow for removal
104          * when processes fork.
105          */
106         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
107                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
108                 return -EINVAL;
109         }
110         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
111                    map->offset, map->size, map->type );
112         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
113                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
114                 return -EINVAL;
115         }
116         map->mtrr   = -1;
117         map->handle = NULL;
118
119         switch ( map->type ) {
120         case _DRM_REGISTERS:
121         case _DRM_FRAME_BUFFER:
122 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
123                 if ( map->offset + map->size < map->offset ||
124                      map->offset < virt_to_phys(high_memory) ) {
125                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
126                         return -EINVAL;
127                 }
128 #endif
129 #ifdef __alpha__
130                 map->offset += dev->hose->mem_space->start;
131 #endif
132                 if (drm_core_has_MTRR(dev)) {
133                         if ( map->type == _DRM_FRAME_BUFFER ||
134                              (map->flags & _DRM_WRITE_COMBINING) ) {
135                                 map->mtrr = mtrr_add( map->offset, map->size,
136                                                       MTRR_TYPE_WRCOMB, 1 );
137                         }
138                 }
139                 if (map->type == _DRM_REGISTERS)
140                         map->handle = drm_ioremap( map->offset, map->size,
141                                                     dev );
142                 break;
143
144         case _DRM_SHM:
145                 map->handle = vmalloc_32(map->size);
146                 DRM_DEBUG( "%lu %d %p\n",
147                            map->size, drm_order( map->size ), map->handle );
148                 if ( !map->handle ) {
149                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
150                         return -ENOMEM;
151                 }
152                 map->offset = (unsigned long)map->handle;
153                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
154                         /* Prevent a 2nd X Server from creating a 2nd lock */
155                         if (dev->lock.hw_lock != NULL) {
156                                 vfree( map->handle );
157                                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
158                                 return -EBUSY;
159                         }
160                         dev->sigdata.lock =
161                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
162                 }
163                 break;
164         case _DRM_AGP:
165                 if (drm_core_has_AGP(dev)) {
166 #ifdef __alpha__
167                         map->offset += dev->hose->mem_space->start;
168 #endif
169                         map->offset += dev->agp->base;
170                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
171                 }
172                 break;
173         case _DRM_SCATTER_GATHER:
174                 if (!dev->sg) {
175                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
176                         return -EINVAL;
177                 }
178                 map->offset += dev->sg->handle;
179                 break;
180         case _DRM_CONSISTENT: 
181                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
182                  * As we're limiting the address to 2^32-1 (or less),
183                  * casting it down to 32 bits is no problem, but we
184                  * need to point to a 64bit variable first. */
185                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
186                 if (!dmah) {
187                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
188                         return -ENOMEM;
189                 }
190                 map->handle = dmah->vaddr;
191                 map->offset = (unsigned long)dmah->busaddr;
192                 kfree(dmah);
193                 break;
194         default:
195                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
196                 return -EINVAL;
197         }
198
199         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
200         if(!list) {
201                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
202                 return -EINVAL;
203         }
204         memset(list, 0, sizeof(*list));
205         list->map = map;
206
207         down(&dev->struct_sem);
208         list_add(&list->head, &dev->maplist->head);
209 #ifdef CONFIG_COMPAT
210         /* Assign a 32-bit handle for _DRM_SHM mappings */
211         /* We do it here so that dev->struct_sem protects the increment */
212         if (map->type == _DRM_SHM)
213                 map->offset = map32_handle += PAGE_SIZE;
214 #endif
215         up(&dev->struct_sem);
216
217         *map_ptr = map;
218         return 0;
219 }
220 EXPORT_SYMBOL(drm_addmap);
221
222 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
223                      unsigned int cmd, unsigned long arg)
224 {
225         drm_file_t *priv = filp->private_data;
226         drm_device_t *dev = priv->head->dev;
227         drm_map_t map;
228         drm_map_t *map_ptr;
229         drm_map_t __user *argp = (void __user *)arg;
230         int err;
231
232         if (!(filp->f_mode & 3))
233                 return -EACCES; /* Require read/write */
234
235         if (copy_from_user(& map, argp, sizeof(map))) {
236                 return -EFAULT;
237         }
238
239         err = drm_addmap( dev, map.offset, map.size, map.type, map.flags,
240                           &map_ptr );
241
242         if (err) {
243                 return err;
244         }
245
246         if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
247                 return -EFAULT;
248         if (map_ptr->type != _DRM_SHM) {
249                 if (copy_to_user(&argp->handle, &map_ptr->offset,
250                                  sizeof(map_ptr->offset)))
251                         return -EFAULT;
252         }
253         return 0;
254 }
255
256
257 /**
258  * Remove a map private from list and deallocate resources if the mapping
259  * isn't in use.
260  *
261  * \param inode device inode.
262  * \param filp file pointer.
263  * \param cmd command.
264  * \param arg pointer to a drm_map_t structure.
265  * \return zero on success or a negative value on error.
266  *
267  * Searches the map on drm_device::maplist, removes it from the list, see if
268  * its being used, and free any associate resource (such as MTRR's) if it's not
269  * being on use.
270  *
271  * \sa drm_addmap
272  */
273 int drm_rmmap(drm_device_t *dev, void *handle)
274 {
275         struct list_head *list;
276         drm_map_list_t *r_list = NULL;
277         drm_vma_entry_t *pt, *prev;
278         drm_map_t *map;
279         int found_maps = 0;
280
281         down(&dev->struct_sem);
282         list = &dev->maplist->head;
283         list_for_each(list, &dev->maplist->head) {
284                 r_list = list_entry(list, drm_map_list_t, head);
285
286                 if(r_list->map &&
287                    r_list->map->handle == handle &&
288                    r_list->map->flags & _DRM_REMOVABLE) break;
289         }
290
291         /* List has wrapped around to the head pointer, or its empty we didn't
292          * find anything.
293          */
294         if(list == (&dev->maplist->head)) {
295                 up(&dev->struct_sem);
296                 return -EINVAL;
297         }
298         map = r_list->map;
299         list_del(list);
300         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
301
302         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
303                 if (pt->vma->vm_private_data == map) found_maps++;
304         }
305
306         if(!found_maps) {
307                 drm_dma_handle_t dmah;
308
309                 switch (map->type) {
310                 case _DRM_REGISTERS:
311                 case _DRM_FRAME_BUFFER:
312                   if (drm_core_has_MTRR(dev)) {
313                                 if (map->mtrr >= 0) {
314                                         int retcode;
315                                         retcode = mtrr_del(map->mtrr,
316                                                            map->offset,
317                                                            map->size);
318                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
319                                 }
320                         }
321                         drm_ioremapfree(map->handle, map->size, dev);
322                         break;
323                 case _DRM_SHM:
324                         vfree(map->handle);
325                         break;
326                 case _DRM_AGP:
327                 case _DRM_SCATTER_GATHER:
328                         break;
329                 case _DRM_CONSISTENT:
330                         dmah.vaddr = map->handle;
331                         dmah.busaddr = map->offset;
332                         dmah.size = map->size;
333                         __drm_pci_free(dev, &dmah);
334                         break;
335                 }
336                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
337         }
338         up(&dev->struct_sem);
339         return 0;
340 }
341 EXPORT_SYMBOL(drm_rmmap);
342
343 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
344                     unsigned int cmd, unsigned long arg)
345 {
346         drm_file_t *priv = filp->private_data;
347         drm_device_t *dev = priv->head->dev;
348         drm_map_t request;
349
350         if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
351                 return -EFAULT;
352         }
353
354         return drm_rmmap(dev, request.handle);
355 }
356
357 /**
358  * Cleanup after an error on one of the addbufs() functions.
359  *
360  * \param entry buffer entry where the error occurred.
361  *
362  * Frees any pages and buffers associated with the given entry.
363  */
364 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
365 {
366         int i;
367
368         if (entry->seg_count) {
369                 for (i = 0; i < entry->seg_count; i++) {
370                         if (entry->seglist[i]) {
371                                 drm_free_pages(entry->seglist[i],
372                                                 entry->page_order,
373                                                 DRM_MEM_DMA);
374                         }
375                 }
376                 drm_free(entry->seglist,
377                           entry->seg_count *
378                           sizeof(*entry->seglist),
379                           DRM_MEM_SEGS);
380
381                 entry->seg_count = 0;
382         }
383
384         if (entry->buf_count) {
385                 for (i = 0; i < entry->buf_count; i++) {
386                         if (entry->buflist[i].dev_private) {
387                                 drm_free(entry->buflist[i].dev_private,
388                                           entry->buflist[i].dev_priv_size,
389                                           DRM_MEM_BUFS);
390                         }
391                 }
392                 drm_free(entry->buflist,
393                           entry->buf_count *
394                           sizeof(*entry->buflist),
395                           DRM_MEM_BUFS);
396
397                 entry->buf_count = 0;
398         }
399 }
400
401 #if __OS_HAS_AGP
402 /**
403  * Add AGP buffers for DMA transfers.
404  *
405  * \param dev drm_device_t to which the buffers are to be added.
406  * \param request pointer to a drm_buf_desc_t describing the request.
407  * \return zero on success or a negative number on failure.
408  * 
409  * After some sanity checks creates a drm_buf structure for each buffer and
410  * reallocates the buffer list of the same size order to accommodate the new
411  * buffers.
412  */
413 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
414 {
415         drm_device_dma_t *dma = dev->dma;
416         drm_buf_entry_t *entry;
417         drm_buf_t *buf;
418         unsigned long offset;
419         unsigned long agp_offset;
420         int count;
421         int order;
422         int size;
423         int alignment;
424         int page_order;
425         int total;
426         int byte_count;
427         int i;
428         drm_buf_t **temp_buflist;
429
430         if ( !dma ) return -EINVAL;
431
432         count = request->count;
433         order = drm_order(request->size);
434         size = 1 << order;
435
436         alignment  = (request->flags & _DRM_PAGE_ALIGN)
437                 ? PAGE_ALIGN(size) : size;
438         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
439         total = PAGE_SIZE << page_order;
440
441         byte_count = 0;
442         agp_offset = dev->agp->base + request->agp_start;
443
444         DRM_DEBUG( "count:      %d\n",  count );
445         DRM_DEBUG( "order:      %d\n",  order );
446         DRM_DEBUG( "size:       %d\n",  size );
447         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
448         DRM_DEBUG( "alignment:  %d\n",  alignment );
449         DRM_DEBUG( "page_order: %d\n",  page_order );
450         DRM_DEBUG( "total:      %d\n",  total );
451
452         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
453         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
454
455         spin_lock( &dev->count_lock );
456         if ( dev->buf_use ) {
457                 spin_unlock( &dev->count_lock );
458                 return -EBUSY;
459         }
460         atomic_inc( &dev->buf_alloc );
461         spin_unlock( &dev->count_lock );
462
463         down( &dev->struct_sem );
464         entry = &dma->bufs[order];
465         if ( entry->buf_count ) {
466                 up( &dev->struct_sem );
467                 atomic_dec( &dev->buf_alloc );
468                 return -ENOMEM; /* May only call once for each order */
469         }
470
471         if (count < 0 || count > 4096) {
472                 up( &dev->struct_sem );
473                 atomic_dec( &dev->buf_alloc );
474                 return -EINVAL;
475         }
476
477         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
478                                     DRM_MEM_BUFS );
479         if ( !entry->buflist ) {
480                 up( &dev->struct_sem );
481                 atomic_dec( &dev->buf_alloc );
482                 return -ENOMEM;
483         }
484         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
485
486         entry->buf_size = size;
487         entry->page_order = page_order;
488
489         offset = 0;
490
491         while ( entry->buf_count < count ) {
492                 buf          = &entry->buflist[entry->buf_count];
493                 buf->idx     = dma->buf_count + entry->buf_count;
494                 buf->total   = alignment;
495                 buf->order   = order;
496                 buf->used    = 0;
497
498                 buf->offset  = (dma->byte_count + offset);
499                 buf->bus_address = agp_offset + offset;
500                 buf->address = (void *)(agp_offset + offset);
501                 buf->next    = NULL;
502                 buf->waiting = 0;
503                 buf->pending = 0;
504                 init_waitqueue_head( &buf->dma_wait );
505                 buf->filp    = NULL;
506
507                 buf->dev_priv_size = dev->driver->dev_priv_size;
508                 buf->dev_private = drm_alloc( buf->dev_priv_size,
509                                                DRM_MEM_BUFS );
510                 if(!buf->dev_private) {
511                         /* Set count correctly so we free the proper amount. */
512                         entry->buf_count = count;
513                         drm_cleanup_buf_error(dev,entry);
514                         up( &dev->struct_sem );
515                         atomic_dec( &dev->buf_alloc );
516                         return -ENOMEM;
517                 }
518                 memset( buf->dev_private, 0, buf->dev_priv_size );
519
520                 DRM_DEBUG( "buffer %d @ %p\n",
521                            entry->buf_count, buf->address );
522
523                 offset += alignment;
524                 entry->buf_count++;
525                 byte_count += PAGE_SIZE << page_order;
526         }
527
528         DRM_DEBUG( "byte_count: %d\n", byte_count );
529
530         temp_buflist = drm_realloc( dma->buflist,
531                                      dma->buf_count * sizeof(*dma->buflist),
532                                      (dma->buf_count + entry->buf_count)
533                                      * sizeof(*dma->buflist),
534                                      DRM_MEM_BUFS );
535         if(!temp_buflist) {
536                 /* Free the entry because it isn't valid */
537                 drm_cleanup_buf_error(dev,entry);
538                 up( &dev->struct_sem );
539                 atomic_dec( &dev->buf_alloc );
540                 return -ENOMEM;
541         }
542         dma->buflist = temp_buflist;
543
544         for ( i = 0 ; i < entry->buf_count ; i++ ) {
545                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
546         }
547
548         dma->buf_count += entry->buf_count;
549         dma->byte_count += byte_count;
550
551         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
552         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
553
554         up( &dev->struct_sem );
555
556         request->count = entry->buf_count;
557         request->size = size;
558
559         dma->flags = _DRM_DMA_USE_AGP;
560
561         atomic_dec( &dev->buf_alloc );
562         return 0;
563 }
564 #endif /* __OS_HAS_AGP */
565
566 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
567 {
568         drm_device_dma_t *dma = dev->dma;
569         int count;
570         int order;
571         int size;
572         int total;
573         int page_order;
574         drm_buf_entry_t *entry;
575         unsigned long page;
576         drm_buf_t *buf;
577         int alignment;
578         unsigned long offset;
579         int i;
580         int byte_count;
581         int page_count;
582         unsigned long *temp_pagelist;
583         drm_buf_t **temp_buflist;
584
585         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
586         if ( !dma ) return -EINVAL;
587
588         count = request->count;
589         order = drm_order(request->size);
590         size = 1 << order;
591
592         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
593                    request->count, request->size, size,
594                    order, dev->queue_count );
595
596         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
597         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
598
599         alignment = (request->flags & _DRM_PAGE_ALIGN)
600                 ? PAGE_ALIGN(size) : size;
601         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
602         total = PAGE_SIZE << page_order;
603
604         spin_lock( &dev->count_lock );
605         if ( dev->buf_use ) {
606                 spin_unlock( &dev->count_lock );
607                 return -EBUSY;
608         }
609         atomic_inc( &dev->buf_alloc );
610         spin_unlock( &dev->count_lock );
611
612         down( &dev->struct_sem );
613         entry = &dma->bufs[order];
614         if ( entry->buf_count ) {
615                 up( &dev->struct_sem );
616                 atomic_dec( &dev->buf_alloc );
617                 return -ENOMEM; /* May only call once for each order */
618         }
619
620         if (count < 0 || count > 4096) {
621                 up( &dev->struct_sem );
622                 atomic_dec( &dev->buf_alloc );
623                 return -EINVAL;
624         }
625
626         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
627                                     DRM_MEM_BUFS );
628         if ( !entry->buflist ) {
629                 up( &dev->struct_sem );
630                 atomic_dec( &dev->buf_alloc );
631                 return -ENOMEM;
632         }
633         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
634
635         entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
636                                     DRM_MEM_SEGS );
637         if ( !entry->seglist ) {
638                 drm_free( entry->buflist,
639                           count * sizeof(*entry->buflist),
640                           DRM_MEM_BUFS );
641                 up( &dev->struct_sem );
642                 atomic_dec( &dev->buf_alloc );
643                 return -ENOMEM;
644         }
645         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
646
647         /* Keep the original pagelist until we know all the allocations
648          * have succeeded
649          */
650         temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
651                                     * sizeof(*dma->pagelist),
652                                     DRM_MEM_PAGES );
653         if (!temp_pagelist) {
654                 drm_free( entry->buflist,
655                            count * sizeof(*entry->buflist),
656                            DRM_MEM_BUFS );
657                 drm_free( entry->seglist,
658                            count * sizeof(*entry->seglist),
659                            DRM_MEM_SEGS );
660                 up( &dev->struct_sem );
661                 atomic_dec( &dev->buf_alloc );
662                 return -ENOMEM;
663         }
664         memcpy(temp_pagelist,
665                dma->pagelist,
666                dma->page_count * sizeof(*dma->pagelist));
667         DRM_DEBUG( "pagelist: %d entries\n",
668                    dma->page_count + (count << page_order) );
669
670         entry->buf_size = size;
671         entry->page_order = page_order;
672         byte_count = 0;
673         page_count = 0;
674
675         while ( entry->buf_count < count ) {
676                 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
677                 if ( !page ) {
678                         /* Set count correctly so we free the proper amount. */
679                         entry->buf_count = count;
680                         entry->seg_count = count;
681                         drm_cleanup_buf_error(dev, entry);
682                         drm_free( temp_pagelist,
683                                    (dma->page_count + (count << page_order))
684                                    * sizeof(*dma->pagelist),
685                                    DRM_MEM_PAGES );
686                         up( &dev->struct_sem );
687                         atomic_dec( &dev->buf_alloc );
688                         return -ENOMEM;
689                 }
690                 entry->seglist[entry->seg_count++] = page;
691                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
692                         DRM_DEBUG( "page %d @ 0x%08lx\n",
693                                    dma->page_count + page_count,
694                                    page + PAGE_SIZE * i );
695                         temp_pagelist[dma->page_count + page_count++]
696                                 = page + PAGE_SIZE * i;
697                 }
698                 for ( offset = 0 ;
699                       offset + size <= total && entry->buf_count < count ;
700                       offset += alignment, ++entry->buf_count ) {
701                         buf          = &entry->buflist[entry->buf_count];
702                         buf->idx     = dma->buf_count + entry->buf_count;
703                         buf->total   = alignment;
704                         buf->order   = order;
705                         buf->used    = 0;
706                         buf->offset  = (dma->byte_count + byte_count + offset);
707                         buf->address = (void *)(page + offset);
708                         buf->next    = NULL;
709                         buf->waiting = 0;
710                         buf->pending = 0;
711                         init_waitqueue_head( &buf->dma_wait );
712                         buf->filp    = NULL;
713
714                         buf->dev_priv_size = dev->driver->dev_priv_size;
715                         buf->dev_private = drm_alloc( buf->dev_priv_size,
716                                                        DRM_MEM_BUFS );
717                         if(!buf->dev_private) {
718                                 /* Set count correctly so we free the proper amount. */
719                                 entry->buf_count = count;
720                                 entry->seg_count = count;
721                                 drm_cleanup_buf_error(dev,entry);
722                                 drm_free( temp_pagelist,
723                                            (dma->page_count + (count << page_order))
724                                            * sizeof(*dma->pagelist),
725                                            DRM_MEM_PAGES );
726                                 up( &dev->struct_sem );
727                                 atomic_dec( &dev->buf_alloc );
728                                 return -ENOMEM;
729                         }
730                         memset( buf->dev_private, 0, buf->dev_priv_size );
731
732                         DRM_DEBUG( "buffer %d @ %p\n",
733                                    entry->buf_count, buf->address );
734                 }
735                 byte_count += PAGE_SIZE << page_order;
736         }
737
738         temp_buflist = drm_realloc( dma->buflist,
739                                      dma->buf_count * sizeof(*dma->buflist),
740                                      (dma->buf_count + entry->buf_count)
741                                      * sizeof(*dma->buflist),
742                                      DRM_MEM_BUFS );
743         if (!temp_buflist) {
744                 /* Free the entry because it isn't valid */
745                 drm_cleanup_buf_error(dev,entry);
746                 drm_free( temp_pagelist,
747                            (dma->page_count + (count << page_order))
748                            * sizeof(*dma->pagelist),
749                            DRM_MEM_PAGES );
750                 up( &dev->struct_sem );
751                 atomic_dec( &dev->buf_alloc );
752                 return -ENOMEM;
753         }
754         dma->buflist = temp_buflist;
755
756         for ( i = 0 ; i < entry->buf_count ; i++ ) {
757                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
758         }
759
760         /* No allocations failed, so now we can replace the orginal pagelist
761          * with the new one.
762          */
763         if (dma->page_count) {
764                 drm_free(dma->pagelist,
765                           dma->page_count * sizeof(*dma->pagelist),
766                           DRM_MEM_PAGES);
767         }
768         dma->pagelist = temp_pagelist;
769
770         dma->buf_count += entry->buf_count;
771         dma->seg_count += entry->seg_count;
772         dma->page_count += entry->seg_count << page_order;
773         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
774
775         up( &dev->struct_sem );
776
777         request->count = entry->buf_count;
778         request->size = size;
779
780         atomic_dec( &dev->buf_alloc );
781         return 0;
782
783 }
784
785 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
786 {
787         drm_device_dma_t *dma = dev->dma;
788         drm_buf_entry_t *entry;
789         drm_buf_t *buf;
790         unsigned long offset;
791         unsigned long agp_offset;
792         int count;
793         int order;
794         int size;
795         int alignment;
796         int page_order;
797         int total;
798         int byte_count;
799         int i;
800         drm_buf_t **temp_buflist;
801
802         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
803         
804         if ( !dma ) return -EINVAL;
805
806         count = request->count;
807         order = drm_order(request->size);
808         size = 1 << order;
809
810         alignment  = (request->flags & _DRM_PAGE_ALIGN)
811                         ? PAGE_ALIGN(size) : size;
812         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
813         total = PAGE_SIZE << page_order;
814
815         byte_count = 0;
816         agp_offset = request->agp_start;
817
818         DRM_DEBUG( "count:      %d\n",  count );
819         DRM_DEBUG( "order:      %d\n",  order );
820         DRM_DEBUG( "size:       %d\n",  size );
821         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
822         DRM_DEBUG( "alignment:  %d\n",  alignment );
823         DRM_DEBUG( "page_order: %d\n",  page_order );
824         DRM_DEBUG( "total:      %d\n",  total );
825
826         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
827         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
828
829         spin_lock( &dev->count_lock );
830         if ( dev->buf_use ) {
831                 spin_unlock( &dev->count_lock );
832                 return -EBUSY;
833         }
834         atomic_inc( &dev->buf_alloc );
835         spin_unlock( &dev->count_lock );
836
837         down( &dev->struct_sem );
838         entry = &dma->bufs[order];
839         if ( entry->buf_count ) {
840                 up( &dev->struct_sem );
841                 atomic_dec( &dev->buf_alloc );
842                 return -ENOMEM; /* May only call once for each order */
843         }
844
845         if (count < 0 || count > 4096) {
846                 up( &dev->struct_sem );
847                 atomic_dec( &dev->buf_alloc );
848                 return -EINVAL;
849         }
850
851         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
852                                      DRM_MEM_BUFS );
853         if ( !entry->buflist ) {
854                 up( &dev->struct_sem );
855                 atomic_dec( &dev->buf_alloc );
856                 return -ENOMEM;
857         }
858         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
859
860         entry->buf_size = size;
861         entry->page_order = page_order;
862
863         offset = 0;
864
865         while ( entry->buf_count < count ) {
866                 buf          = &entry->buflist[entry->buf_count];
867                 buf->idx     = dma->buf_count + entry->buf_count;
868                 buf->total   = alignment;
869                 buf->order   = order;
870                 buf->used    = 0;
871
872                 buf->offset  = (dma->byte_count + offset);
873                 buf->bus_address = agp_offset + offset;
874                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
875                 buf->next    = NULL;
876                 buf->waiting = 0;
877                 buf->pending = 0;
878                 init_waitqueue_head( &buf->dma_wait );
879                 buf->filp    = NULL;
880
881                 buf->dev_priv_size = dev->driver->dev_priv_size;
882                 buf->dev_private = drm_alloc( buf->dev_priv_size,
883                                                DRM_MEM_BUFS );
884                 if(!buf->dev_private) {
885                         /* Set count correctly so we free the proper amount. */
886                         entry->buf_count = count;
887                         drm_cleanup_buf_error(dev,entry);
888                         up( &dev->struct_sem );
889                         atomic_dec( &dev->buf_alloc );
890                         return -ENOMEM;
891                 }
892
893                 memset( buf->dev_private, 0, buf->dev_priv_size );
894
895                 DRM_DEBUG( "buffer %d @ %p\n",
896                            entry->buf_count, buf->address );
897
898                 offset += alignment;
899                 entry->buf_count++;
900                 byte_count += PAGE_SIZE << page_order;
901         }
902
903         DRM_DEBUG( "byte_count: %d\n", byte_count );
904
905         temp_buflist = drm_realloc( dma->buflist,
906                                      dma->buf_count * sizeof(*dma->buflist),
907                                      (dma->buf_count + entry->buf_count)
908                                      * sizeof(*dma->buflist),
909                                      DRM_MEM_BUFS );
910         if(!temp_buflist) {
911                 /* Free the entry because it isn't valid */
912                 drm_cleanup_buf_error(dev,entry);
913                 up( &dev->struct_sem );
914                 atomic_dec( &dev->buf_alloc );
915                 return -ENOMEM;
916         }
917         dma->buflist = temp_buflist;
918
919         for ( i = 0 ; i < entry->buf_count ; i++ ) {
920                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
921         }
922
923         dma->buf_count += entry->buf_count;
924         dma->byte_count += byte_count;
925
926         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
927         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
928
929         up( &dev->struct_sem );
930
931         request->count = entry->buf_count;
932         request->size = size;
933
934         dma->flags = _DRM_DMA_USE_SG;
935
936         atomic_dec( &dev->buf_alloc );
937         return 0;
938 }
939
940 int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
941 {
942         drm_device_dma_t *dma = dev->dma;
943         drm_buf_entry_t *entry;
944         drm_buf_t *buf;
945         unsigned long offset;
946         unsigned long agp_offset;
947         int count;
948         int order;
949         int size;
950         int alignment;
951         int page_order;
952         int total;
953         int byte_count;
954         int i;
955         drm_buf_t **temp_buflist;
956
957         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
958                 return -EINVAL;
959     
960         if (!dma)
961                 return -EINVAL;
962
963         count = request->count;
964         order = drm_order(request->size);
965         size = 1 << order;
966
967         alignment = (request->flags & _DRM_PAGE_ALIGN)
968             ? PAGE_ALIGN(size) : size;
969         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
970         total = PAGE_SIZE << page_order;
971
972         byte_count = 0;
973         agp_offset = request->agp_start;
974
975         DRM_DEBUG("count:      %d\n", count);
976         DRM_DEBUG("order:      %d\n", order);
977         DRM_DEBUG("size:       %d\n", size);
978         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
979         DRM_DEBUG("alignment:  %d\n", alignment);
980         DRM_DEBUG("page_order: %d\n", page_order);
981         DRM_DEBUG("total:      %d\n", total);
982
983         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
984                 return -EINVAL;
985         if (dev->queue_count)
986                 return -EBUSY;  /* Not while in use */
987
988         spin_lock(&dev->count_lock);
989         if (dev->buf_use) {
990                 spin_unlock(&dev->count_lock);
991                 return -EBUSY;
992         }
993         atomic_inc(&dev->buf_alloc);
994         spin_unlock(&dev->count_lock);
995
996         down(&dev->struct_sem);
997         entry = &dma->bufs[order];
998         if (entry->buf_count) {
999                 up(&dev->struct_sem);
1000                 atomic_dec(&dev->buf_alloc);
1001                 return -ENOMEM; /* May only call once for each order */
1002         }
1003
1004         if (count < 0 || count > 4096) {
1005                 up(&dev->struct_sem);
1006                 atomic_dec(&dev->buf_alloc);
1007                 return -EINVAL;
1008         }
1009
1010         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1011                                    DRM_MEM_BUFS);
1012         if (!entry->buflist) {
1013                 up(&dev->struct_sem);
1014                 atomic_dec(&dev->buf_alloc);
1015                 return -ENOMEM;
1016         }
1017         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1018
1019         entry->buf_size = size;
1020         entry->page_order = page_order;
1021
1022         offset = 0;
1023
1024         while (entry->buf_count < count) {
1025                 buf = &entry->buflist[entry->buf_count];
1026                 buf->idx = dma->buf_count + entry->buf_count;
1027                 buf->total = alignment;
1028                 buf->order = order;
1029                 buf->used = 0;
1030
1031                 buf->offset = (dma->byte_count + offset);
1032                 buf->bus_address = agp_offset + offset;
1033                 buf->address = (void *)(agp_offset + offset);
1034                 buf->next = NULL;
1035                 buf->waiting = 0;
1036                 buf->pending = 0;
1037                 init_waitqueue_head(&buf->dma_wait);
1038                 buf->filp = NULL;
1039
1040                 buf->dev_priv_size = dev->driver->dev_priv_size;
1041                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1042                 if (!buf->dev_private) {
1043                         /* Set count correctly so we free the proper amount. */
1044                         entry->buf_count = count;
1045                         drm_cleanup_buf_error(dev, entry);
1046                         up(&dev->struct_sem);
1047                         atomic_dec(&dev->buf_alloc);
1048                         return -ENOMEM;
1049                 }
1050                 memset(buf->dev_private, 0, buf->dev_priv_size);
1051
1052                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1053
1054                 offset += alignment;
1055                 entry->buf_count++;
1056                 byte_count += PAGE_SIZE << page_order;
1057         }
1058
1059         DRM_DEBUG("byte_count: %d\n", byte_count);
1060
1061         temp_buflist = drm_realloc(dma->buflist,
1062                                    dma->buf_count * sizeof(*dma->buflist),
1063                                    (dma->buf_count + entry->buf_count)
1064                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1065         if (!temp_buflist) {
1066                 /* Free the entry because it isn't valid */
1067                 drm_cleanup_buf_error(dev, entry);
1068                 up(&dev->struct_sem);
1069                 atomic_dec(&dev->buf_alloc);
1070                 return -ENOMEM;
1071         }
1072         dma->buflist = temp_buflist;
1073
1074         for (i = 0; i < entry->buf_count; i++) {
1075                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1076         }
1077
1078         dma->buf_count += entry->buf_count;
1079         dma->byte_count += byte_count;
1080
1081         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1082         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1083
1084         up(&dev->struct_sem);
1085
1086         request->count = entry->buf_count;
1087         request->size = size;
1088
1089         dma->flags = _DRM_DMA_USE_FB;
1090
1091         atomic_dec(&dev->buf_alloc);
1092         return 0;
1093 }
1094
1095 /**
1096  * Add buffers for DMA transfers (ioctl).
1097  *
1098  * \param inode device inode.
1099  * \param filp file pointer.
1100  * \param cmd command.
1101  * \param arg pointer to a drm_buf_desc_t request.
1102  * \return zero on success or a negative number on failure.
1103  *
1104  * According with the memory type specified in drm_buf_desc::flags and the
1105  * build options, it dispatches the call either to addbufs_agp(),
1106  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1107  * PCI memory respectively.
1108  */
1109 int drm_addbufs( struct inode *inode, struct file *filp,
1110                   unsigned int cmd, unsigned long arg )
1111 {
1112         drm_buf_desc_t request;
1113         drm_file_t *priv = filp->private_data;
1114         drm_device_t *dev = priv->head->dev;
1115         int ret;
1116         
1117         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1118                 return -EINVAL;
1119
1120         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1121                              sizeof(request) ) )
1122                 return -EFAULT;
1123
1124 #if __OS_HAS_AGP
1125         if ( request.flags & _DRM_AGP_BUFFER )
1126                 ret=drm_addbufs_agp(dev, &request);
1127         else
1128 #endif
1129         if ( request.flags & _DRM_SG_BUFFER )
1130                 ret=drm_addbufs_sg(dev, &request);
1131         else if ( request.flags & _DRM_FB_BUFFER)
1132                 ret=drm_addbufs_fb(dev, &request);
1133         else
1134                 ret=drm_addbufs_pci(dev, &request);
1135
1136         if (ret==0) {
1137                 if (copy_to_user((void __user *)arg, &request,
1138                                  sizeof(request))) {
1139                         ret = -EFAULT;
1140                 }
1141         }
1142         return ret;
1143 }
1144
1145
1146 /**
1147  * Get information about the buffer mappings.
1148  *
1149  * This was originally mean for debugging purposes, or by a sophisticated
1150  * client library to determine how best to use the available buffers (e.g.,
1151  * large buffers can be used for image transfer).
1152  *
1153  * \param inode device inode.
1154  * \param filp file pointer.
1155  * \param cmd command.
1156  * \param arg pointer to a drm_buf_info structure.
1157  * \return zero on success or a negative number on failure.
1158  *
1159  * Increments drm_device::buf_use while holding the drm_device::count_lock
1160  * lock, preventing of allocating more buffers after this call. Information
1161  * about each requested buffer is then copied into user space.
1162  */
1163 int drm_infobufs( struct inode *inode, struct file *filp,
1164                    unsigned int cmd, unsigned long arg )
1165 {
1166         drm_file_t *priv = filp->private_data;
1167         drm_device_t *dev = priv->head->dev;
1168         drm_device_dma_t *dma = dev->dma;
1169         drm_buf_info_t request;
1170         drm_buf_info_t __user *argp = (void __user *)arg;
1171         int i;
1172         int count;
1173
1174         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1175                 return -EINVAL;
1176
1177         if ( !dma ) return -EINVAL;
1178
1179         spin_lock( &dev->count_lock );
1180         if ( atomic_read( &dev->buf_alloc ) ) {
1181                 spin_unlock( &dev->count_lock );
1182                 return -EBUSY;
1183         }
1184         ++dev->buf_use;         /* Can't allocate more after this call */
1185         spin_unlock( &dev->count_lock );
1186
1187         if ( copy_from_user( &request, argp, sizeof(request) ) )
1188                 return -EFAULT;
1189
1190         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1191                 if ( dma->bufs[i].buf_count ) ++count;
1192         }
1193
1194         DRM_DEBUG( "count = %d\n", count );
1195
1196         if ( request.count >= count ) {
1197                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1198                         if ( dma->bufs[i].buf_count ) {
1199                                 drm_buf_desc_t __user *to = &request.list[count];
1200                                 drm_buf_entry_t *from = &dma->bufs[i];
1201                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1202                                 if ( copy_to_user( &to->count,
1203                                                    &from->buf_count,
1204                                                    sizeof(from->buf_count) ) ||
1205                                      copy_to_user( &to->size,
1206                                                    &from->buf_size,
1207                                                    sizeof(from->buf_size) ) ||
1208                                      copy_to_user( &to->low_mark,
1209                                                    &list->low_mark,
1210                                                    sizeof(list->low_mark) ) ||
1211                                      copy_to_user( &to->high_mark,
1212                                                    &list->high_mark,
1213                                                    sizeof(list->high_mark) ) )
1214                                         return -EFAULT;
1215
1216                                 DRM_DEBUG( "%d %d %d %d %d\n",
1217                                            i,
1218                                            dma->bufs[i].buf_count,
1219                                            dma->bufs[i].buf_size,
1220                                            dma->bufs[i].freelist.low_mark,
1221                                            dma->bufs[i].freelist.high_mark );
1222                                 ++count;
1223                         }
1224                 }
1225         }
1226         request.count = count;
1227
1228         if ( copy_to_user( argp, &request, sizeof(request) ) )
1229                 return -EFAULT;
1230
1231         return 0;
1232 }
1233
1234 /**
1235  * Specifies a low and high water mark for buffer allocation
1236  *
1237  * \param inode device inode.
1238  * \param filp file pointer.
1239  * \param cmd command.
1240  * \param arg a pointer to a drm_buf_desc structure.
1241  * \return zero on success or a negative number on failure.
1242  *
1243  * Verifies that the size order is bounded between the admissible orders and
1244  * updates the respective drm_device_dma::bufs entry low and high water mark.
1245  *
1246  * \note This ioctl is deprecated and mostly never used.
1247  */
1248 int drm_markbufs( struct inode *inode, struct file *filp,
1249                    unsigned int cmd, unsigned long arg )
1250 {
1251         drm_file_t *priv = filp->private_data;
1252         drm_device_t *dev = priv->head->dev;
1253         drm_device_dma_t *dma = dev->dma;
1254         drm_buf_desc_t request;
1255         int order;
1256         drm_buf_entry_t *entry;
1257
1258         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1259                 return -EINVAL;
1260
1261         if ( !dma ) return -EINVAL;
1262
1263         if ( copy_from_user( &request,
1264                              (drm_buf_desc_t __user *)arg,
1265                              sizeof(request) ) )
1266                 return -EFAULT;
1267
1268         DRM_DEBUG( "%d, %d, %d\n",
1269                    request.size, request.low_mark, request.high_mark );
1270         order = drm_order( request.size );
1271         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1272         entry = &dma->bufs[order];
1273
1274         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1275                 return -EINVAL;
1276         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1277                 return -EINVAL;
1278
1279         entry->freelist.low_mark  = request.low_mark;
1280         entry->freelist.high_mark = request.high_mark;
1281
1282         return 0;
1283 }
1284
1285 /**
1286  * Unreserve the buffers in list, previously reserved using drmDMA. 
1287  *
1288  * \param inode device inode.
1289  * \param filp file pointer.
1290  * \param cmd command.
1291  * \param arg pointer to a drm_buf_free structure.
1292  * \return zero on success or a negative number on failure.
1293  * 
1294  * Calls free_buffer() for each used buffer.
1295  * This function is primarily used for debugging.
1296  */
1297 int drm_freebufs( struct inode *inode, struct file *filp,
1298                    unsigned int cmd, unsigned long arg )
1299 {
1300         drm_file_t *priv = filp->private_data;
1301         drm_device_t *dev = priv->head->dev;
1302         drm_device_dma_t *dma = dev->dma;
1303         drm_buf_free_t request;
1304         int i;
1305         int idx;
1306         drm_buf_t *buf;
1307
1308         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1309                 return -EINVAL;
1310
1311         if ( !dma ) return -EINVAL;
1312
1313         if ( copy_from_user( &request,
1314                              (drm_buf_free_t __user *)arg,
1315                              sizeof(request) ) )
1316                 return -EFAULT;
1317
1318         DRM_DEBUG( "%d\n", request.count );
1319         for ( i = 0 ; i < request.count ; i++ ) {
1320                 if ( copy_from_user( &idx,
1321                                      &request.list[i],
1322                                      sizeof(idx) ) )
1323                         return -EFAULT;
1324                 if ( idx < 0 || idx >= dma->buf_count ) {
1325                         DRM_ERROR( "Index %d (of %d max)\n",
1326                                    idx, dma->buf_count - 1 );
1327                         return -EINVAL;
1328                 }
1329                 buf = dma->buflist[idx];
1330                 if ( buf->filp != filp ) {
1331                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1332                                    current->pid );
1333                         return -EINVAL;
1334                 }
1335                 drm_free_buffer( dev, buf );
1336         }
1337
1338         return 0;
1339 }
1340
1341 /**
1342  * Maps all of the DMA buffers into client-virtual space (ioctl).
1343  *
1344  * \param inode device inode.
1345  * \param filp file pointer.
1346  * \param cmd command.
1347  * \param arg pointer to a drm_buf_map structure.
1348  * \return zero on success or a negative number on failure.
1349  *
1350  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1351  * about each buffer into user space. The PCI buffers are already mapped on the
1352  * addbufs_pci() call.
1353  */
1354 int drm_mapbufs( struct inode *inode, struct file *filp,
1355                   unsigned int cmd, unsigned long arg )
1356 {
1357         drm_file_t *priv = filp->private_data;
1358         drm_device_t *dev = priv->head->dev;
1359         drm_device_dma_t *dma = dev->dma;
1360         drm_buf_map_t __user *argp = (void __user *)arg;
1361         int retcode = 0;
1362         const int zero = 0;
1363         unsigned long virtual;
1364         unsigned long address;
1365         drm_buf_map_t request;
1366         int i;
1367
1368         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1369                 return -EINVAL;
1370
1371         if ( !dma ) return -EINVAL;
1372
1373         spin_lock( &dev->count_lock );
1374         if ( atomic_read( &dev->buf_alloc ) ) {
1375                 spin_unlock( &dev->count_lock );
1376                 return -EBUSY;
1377         }
1378         dev->buf_use++;         /* Can't allocate more after this call */
1379         spin_unlock( &dev->count_lock );
1380
1381         if ( copy_from_user( &request, argp, sizeof(request) ) )
1382                 return -EFAULT;
1383
1384         if ( request.count >= dma->buf_count ) {
1385                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1386                     || (drm_core_check_feature(dev, DRIVER_SG) 
1387                         && (dma->flags & _DRM_DMA_USE_SG))
1388                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1389                         && (dma->flags & _DRM_DMA_USE_FB))) {
1390                         drm_map_t *map = dev->agp_buffer_map;
1391
1392                         if ( !map ) {
1393                                 retcode = -EINVAL;
1394                                 goto done;
1395                         }
1396
1397 #if LINUX_VERSION_CODE <= 0x020402
1398                         down( &current->mm->mmap_sem );
1399 #else
1400                         down_write( &current->mm->mmap_sem );
1401 #endif
1402                         virtual = do_mmap( filp, 0, map->size,
1403                                            PROT_READ | PROT_WRITE,
1404                                            MAP_SHARED,
1405                                            (unsigned long)map->offset );
1406 #if LINUX_VERSION_CODE <= 0x020402
1407                         up( &current->mm->mmap_sem );
1408 #else
1409                         up_write( &current->mm->mmap_sem );
1410 #endif
1411                 } else {
1412 #if LINUX_VERSION_CODE <= 0x020402
1413                         down( &current->mm->mmap_sem );
1414 #else
1415                         down_write( &current->mm->mmap_sem );
1416 #endif
1417                         virtual = do_mmap( filp, 0, dma->byte_count,
1418                                            PROT_READ | PROT_WRITE,
1419                                            MAP_SHARED, 0 );
1420 #if LINUX_VERSION_CODE <= 0x020402
1421                         up( &current->mm->mmap_sem );
1422 #else
1423                         up_write( &current->mm->mmap_sem );
1424 #endif
1425                 }
1426                 if ( virtual > -1024UL ) {
1427                         /* Real error */
1428                         retcode = (signed long)virtual;
1429                         goto done;
1430                 }
1431                 request.virtual = (void __user *)virtual;
1432
1433                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1434                         if ( copy_to_user( &request.list[i].idx,
1435                                            &dma->buflist[i]->idx,
1436                                            sizeof(request.list[0].idx) ) ) {
1437                                 retcode = -EFAULT;
1438                                 goto done;
1439                         }
1440                         if ( copy_to_user( &request.list[i].total,
1441                                            &dma->buflist[i]->total,
1442                                            sizeof(request.list[0].total) ) ) {
1443                                 retcode = -EFAULT;
1444                                 goto done;
1445                         }
1446                         if ( copy_to_user( &request.list[i].used,
1447                                            &zero,
1448                                            sizeof(zero) ) ) {
1449                                 retcode = -EFAULT;
1450                                 goto done;
1451                         }
1452                         address = virtual + dma->buflist[i]->offset; /* *** */
1453                         if ( copy_to_user( &request.list[i].address,
1454                                            &address,
1455                                            sizeof(address) ) ) {
1456                                 retcode = -EFAULT;
1457                                 goto done;
1458                         }
1459                 }
1460         }
1461  done:
1462         request.count = dma->buf_count;
1463         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1464
1465         if ( copy_to_user( argp, &request, sizeof(request) ) )
1466                 return -EFAULT;
1467
1468         return retcode;
1469 }
1470