]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/ttm/ttm_bo_vm.c
ttm: fix agp since ttm tt rework
[mv-sheeva.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
34 #include <linux/mm.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
38
39 #define TTM_BO_VM_NUM_PREFAULT 16
40
41 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
42                                                      unsigned long page_start,
43                                                      unsigned long num_pages)
44 {
45         struct rb_node *cur = bdev->addr_space_rb.rb_node;
46         unsigned long cur_offset;
47         struct ttm_buffer_object *bo;
48         struct ttm_buffer_object *best_bo = NULL;
49
50         while (likely(cur != NULL)) {
51                 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52                 cur_offset = bo->vm_node->start;
53                 if (page_start >= cur_offset) {
54                         cur = cur->rb_right;
55                         best_bo = bo;
56                         if (page_start == cur_offset)
57                                 break;
58                 } else
59                         cur = cur->rb_left;
60         }
61
62         if (unlikely(best_bo == NULL))
63                 return NULL;
64
65         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
66                      (page_start + num_pages)))
67                 return NULL;
68
69         return best_bo;
70 }
71
72 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
73 {
74         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75             vma->vm_private_data;
76         struct ttm_bo_device *bdev = bo->bdev;
77         unsigned long page_offset;
78         unsigned long page_last;
79         unsigned long pfn;
80         struct ttm_tt *ttm = NULL;
81         struct page *page;
82         int ret;
83         int i;
84         unsigned long address = (unsigned long)vmf->virtual_address;
85         int retval = VM_FAULT_NOPAGE;
86         struct ttm_mem_type_manager *man =
87                 &bdev->man[bo->mem.mem_type];
88
89         /*
90          * Work around locking order reversal in fault / nopfn
91          * between mmap_sem and bo_reserve: Perform a trylock operation
92          * for reserve, and if it fails, retry the fault after scheduling.
93          */
94
95         ret = ttm_bo_reserve(bo, true, true, false, 0);
96         if (unlikely(ret != 0)) {
97                 if (ret == -EBUSY)
98                         set_need_resched();
99                 return VM_FAULT_NOPAGE;
100         }
101
102         if (bdev->driver->fault_reserve_notify) {
103                 ret = bdev->driver->fault_reserve_notify(bo);
104                 switch (ret) {
105                 case 0:
106                         break;
107                 case -EBUSY:
108                         set_need_resched();
109                 case -ERESTARTSYS:
110                         retval = VM_FAULT_NOPAGE;
111                         goto out_unlock;
112                 default:
113                         retval = VM_FAULT_SIGBUS;
114                         goto out_unlock;
115                 }
116         }
117
118         /*
119          * Wait for buffer data in transit, due to a pipelined
120          * move.
121          */
122
123         spin_lock(&bdev->fence_lock);
124         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
125                 ret = ttm_bo_wait(bo, false, true, false);
126                 spin_unlock(&bdev->fence_lock);
127                 if (unlikely(ret != 0)) {
128                         retval = (ret != -ERESTARTSYS) ?
129                             VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
130                         goto out_unlock;
131                 }
132         } else
133                 spin_unlock(&bdev->fence_lock);
134
135         ret = ttm_mem_io_lock(man, true);
136         if (unlikely(ret != 0)) {
137                 retval = VM_FAULT_NOPAGE;
138                 goto out_unlock;
139         }
140         ret = ttm_mem_io_reserve_vm(bo);
141         if (unlikely(ret != 0)) {
142                 retval = VM_FAULT_SIGBUS;
143                 goto out_io_unlock;
144         }
145
146         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
147             bo->vm_node->start - vma->vm_pgoff;
148         page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
149             bo->vm_node->start - vma->vm_pgoff;
150
151         if (unlikely(page_offset >= bo->num_pages)) {
152                 retval = VM_FAULT_SIGBUS;
153                 goto out_io_unlock;
154         }
155
156         /*
157          * Strictly, we're not allowed to modify vma->vm_page_prot here,
158          * since the mmap_sem is only held in read mode. However, we
159          * modify only the caching bits of vma->vm_page_prot and
160          * consider those bits protected by
161          * the bo->mutex, as we should be the only writers.
162          * There shouldn't really be any readers of these bits except
163          * within vm_insert_mixed()? fork?
164          *
165          * TODO: Add a list of vmas to the bo, and change the
166          * vma->vm_page_prot when the object changes caching policy, with
167          * the correct locks held.
168          */
169         if (bo->mem.bus.is_iomem) {
170                 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
171                                                 vma->vm_page_prot);
172         } else {
173                 ttm = bo->ttm;
174                 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
175                     vm_get_page_prot(vma->vm_flags) :
176                     ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
177
178                 /* Allocate all page at once, most common usage */
179                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
180                         retval = VM_FAULT_OOM;
181                         goto out_io_unlock;
182                 }
183         }
184
185         /*
186          * Speculatively prefault a number of pages. Only error on
187          * first page.
188          */
189         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
190                 if (bo->mem.bus.is_iomem)
191                         pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
192                 else {
193                         page = ttm->pages[page_offset];
194                         if (unlikely(!page && i == 0)) {
195                                 retval = VM_FAULT_OOM;
196                                 goto out_io_unlock;
197                         } else if (unlikely(!page)) {
198                                 break;
199                         }
200                         pfn = page_to_pfn(page);
201                 }
202
203                 ret = vm_insert_mixed(vma, address, pfn);
204                 /*
205                  * Somebody beat us to this PTE or prefaulting to
206                  * an already populated PTE, or prefaulting error.
207                  */
208
209                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
210                         break;
211                 else if (unlikely(ret != 0)) {
212                         retval =
213                             (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
214                         goto out_io_unlock;
215                 }
216
217                 address += PAGE_SIZE;
218                 if (unlikely(++page_offset >= page_last))
219                         break;
220         }
221 out_io_unlock:
222         ttm_mem_io_unlock(man);
223 out_unlock:
224         ttm_bo_unreserve(bo);
225         return retval;
226 }
227
228 static void ttm_bo_vm_open(struct vm_area_struct *vma)
229 {
230         struct ttm_buffer_object *bo =
231             (struct ttm_buffer_object *)vma->vm_private_data;
232
233         (void)ttm_bo_reference(bo);
234 }
235
236 static void ttm_bo_vm_close(struct vm_area_struct *vma)
237 {
238         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
239
240         ttm_bo_unref(&bo);
241         vma->vm_private_data = NULL;
242 }
243
244 static const struct vm_operations_struct ttm_bo_vm_ops = {
245         .fault = ttm_bo_vm_fault,
246         .open = ttm_bo_vm_open,
247         .close = ttm_bo_vm_close
248 };
249
250 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
251                 struct ttm_bo_device *bdev)
252 {
253         struct ttm_bo_driver *driver;
254         struct ttm_buffer_object *bo;
255         int ret;
256
257         read_lock(&bdev->vm_lock);
258         bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
259                                  (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
260         if (likely(bo != NULL))
261                 ttm_bo_reference(bo);
262         read_unlock(&bdev->vm_lock);
263
264         if (unlikely(bo == NULL)) {
265                 printk(KERN_ERR TTM_PFX
266                        "Could not find buffer object to map.\n");
267                 return -EINVAL;
268         }
269
270         driver = bo->bdev->driver;
271         if (unlikely(!driver->verify_access)) {
272                 ret = -EPERM;
273                 goto out_unref;
274         }
275         ret = driver->verify_access(bo, filp);
276         if (unlikely(ret != 0))
277                 goto out_unref;
278
279         vma->vm_ops = &ttm_bo_vm_ops;
280
281         /*
282          * Note: We're transferring the bo reference to
283          * vma->vm_private_data here.
284          */
285
286         vma->vm_private_data = bo;
287         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
288         return 0;
289 out_unref:
290         ttm_bo_unref(&bo);
291         return ret;
292 }
293 EXPORT_SYMBOL(ttm_bo_mmap);
294
295 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
296 {
297         if (vma->vm_pgoff != 0)
298                 return -EACCES;
299
300         vma->vm_ops = &ttm_bo_vm_ops;
301         vma->vm_private_data = ttm_bo_reference(bo);
302         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
303         return 0;
304 }
305 EXPORT_SYMBOL(ttm_fbdev_mmap);
306
307
308 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
309                   const char __user *wbuf, char __user *rbuf, size_t count,
310                   loff_t *f_pos, bool write)
311 {
312         struct ttm_buffer_object *bo;
313         struct ttm_bo_driver *driver;
314         struct ttm_bo_kmap_obj map;
315         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
316         unsigned long kmap_offset;
317         unsigned long kmap_end;
318         unsigned long kmap_num;
319         size_t io_size;
320         unsigned int page_offset;
321         char *virtual;
322         int ret;
323         bool no_wait = false;
324         bool dummy;
325
326         read_lock(&bdev->vm_lock);
327         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
328         if (likely(bo != NULL))
329                 ttm_bo_reference(bo);
330         read_unlock(&bdev->vm_lock);
331
332         if (unlikely(bo == NULL))
333                 return -EFAULT;
334
335         driver = bo->bdev->driver;
336         if (unlikely(!driver->verify_access)) {
337                 ret = -EPERM;
338                 goto out_unref;
339         }
340
341         ret = driver->verify_access(bo, filp);
342         if (unlikely(ret != 0))
343                 goto out_unref;
344
345         kmap_offset = dev_offset - bo->vm_node->start;
346         if (unlikely(kmap_offset >= bo->num_pages)) {
347                 ret = -EFBIG;
348                 goto out_unref;
349         }
350
351         page_offset = *f_pos & ~PAGE_MASK;
352         io_size = bo->num_pages - kmap_offset;
353         io_size = (io_size << PAGE_SHIFT) - page_offset;
354         if (count < io_size)
355                 io_size = count;
356
357         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
358         kmap_num = kmap_end - kmap_offset + 1;
359
360         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
361
362         switch (ret) {
363         case 0:
364                 break;
365         case -EBUSY:
366                 ret = -EAGAIN;
367                 goto out_unref;
368         default:
369                 goto out_unref;
370         }
371
372         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
373         if (unlikely(ret != 0)) {
374                 ttm_bo_unreserve(bo);
375                 goto out_unref;
376         }
377
378         virtual = ttm_kmap_obj_virtual(&map, &dummy);
379         virtual += page_offset;
380
381         if (write)
382                 ret = copy_from_user(virtual, wbuf, io_size);
383         else
384                 ret = copy_to_user(rbuf, virtual, io_size);
385
386         ttm_bo_kunmap(&map);
387         ttm_bo_unreserve(bo);
388         ttm_bo_unref(&bo);
389
390         if (unlikely(ret != 0))
391                 return -EFBIG;
392
393         *f_pos += io_size;
394
395         return io_size;
396 out_unref:
397         ttm_bo_unref(&bo);
398         return ret;
399 }
400
401 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
402                         char __user *rbuf, size_t count, loff_t *f_pos,
403                         bool write)
404 {
405         struct ttm_bo_kmap_obj map;
406         unsigned long kmap_offset;
407         unsigned long kmap_end;
408         unsigned long kmap_num;
409         size_t io_size;
410         unsigned int page_offset;
411         char *virtual;
412         int ret;
413         bool no_wait = false;
414         bool dummy;
415
416         kmap_offset = (*f_pos >> PAGE_SHIFT);
417         if (unlikely(kmap_offset >= bo->num_pages))
418                 return -EFBIG;
419
420         page_offset = *f_pos & ~PAGE_MASK;
421         io_size = bo->num_pages - kmap_offset;
422         io_size = (io_size << PAGE_SHIFT) - page_offset;
423         if (count < io_size)
424                 io_size = count;
425
426         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
427         kmap_num = kmap_end - kmap_offset + 1;
428
429         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
430
431         switch (ret) {
432         case 0:
433                 break;
434         case -EBUSY:
435                 return -EAGAIN;
436         default:
437                 return ret;
438         }
439
440         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
441         if (unlikely(ret != 0)) {
442                 ttm_bo_unreserve(bo);
443                 return ret;
444         }
445
446         virtual = ttm_kmap_obj_virtual(&map, &dummy);
447         virtual += page_offset;
448
449         if (write)
450                 ret = copy_from_user(virtual, wbuf, io_size);
451         else
452                 ret = copy_to_user(rbuf, virtual, io_size);
453
454         ttm_bo_kunmap(&map);
455         ttm_bo_unreserve(bo);
456         ttm_bo_unref(&bo);
457
458         if (unlikely(ret != 0))
459                 return ret;
460
461         *f_pos += io_size;
462
463         return io_size;
464 }