1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42 #include <linux/jiffies.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
46 #include <linux/file.h>
47 #include <linux/module.h>
49 #define TTM_ASSERT_LOCKED(param)
50 #define TTM_DEBUG(fmt, arg...)
51 #define TTM_BO_HASH_ORDER 13
53 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
54 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
55 static void ttm_bo_global_kobj_release(struct kobject *kobj);
57 static struct attribute ttm_bo_count = {
62 static ssize_t ttm_bo_global_show(struct kobject *kobj,
63 struct attribute *attr,
66 struct ttm_bo_global *glob =
67 container_of(kobj, struct ttm_bo_global, kobj);
69 return snprintf(buffer, PAGE_SIZE, "%lu\n",
70 (unsigned long) atomic_read(&glob->bo_count));
73 static struct attribute *ttm_bo_global_attrs[] = {
78 static struct sysfs_ops ttm_bo_global_ops = {
79 .show = &ttm_bo_global_show
82 static struct kobj_type ttm_bo_glob_kobj_type = {
83 .release = &ttm_bo_global_kobj_release,
84 .sysfs_ops = &ttm_bo_global_ops,
85 .default_attrs = ttm_bo_global_attrs
89 static inline uint32_t ttm_bo_type_flags(unsigned type)
94 static void ttm_bo_release_list(struct kref *list_kref)
96 struct ttm_buffer_object *bo =
97 container_of(list_kref, struct ttm_buffer_object, list_kref);
98 struct ttm_bo_device *bdev = bo->bdev;
100 BUG_ON(atomic_read(&bo->list_kref.refcount));
101 BUG_ON(atomic_read(&bo->kref.refcount));
102 BUG_ON(atomic_read(&bo->cpu_writers));
103 BUG_ON(bo->sync_obj != NULL);
104 BUG_ON(bo->mem.mm_node != NULL);
105 BUG_ON(!list_empty(&bo->lru));
106 BUG_ON(!list_empty(&bo->ddestroy));
109 ttm_tt_destroy(bo->ttm);
110 atomic_dec(&bo->glob->bo_count);
114 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
119 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
125 ret = wait_event_interruptible(bo->event_queue,
126 atomic_read(&bo->reserved) == 0);
127 if (unlikely(ret != 0))
130 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
135 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
137 struct ttm_bo_device *bdev = bo->bdev;
138 struct ttm_mem_type_manager *man;
140 BUG_ON(!atomic_read(&bo->reserved));
142 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
144 BUG_ON(!list_empty(&bo->lru));
146 man = &bdev->man[bo->mem.mem_type];
147 list_add_tail(&bo->lru, &man->lru);
148 kref_get(&bo->list_kref);
150 if (bo->ttm != NULL) {
151 list_add_tail(&bo->swap, &bo->glob->swap_lru);
152 kref_get(&bo->list_kref);
158 * Call with the lru_lock held.
161 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
165 if (!list_empty(&bo->swap)) {
166 list_del_init(&bo->swap);
169 if (!list_empty(&bo->lru)) {
170 list_del_init(&bo->lru);
175 * TODO: Add a driver hook to delete from
176 * driver-specific LRU's here.
182 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
184 bool no_wait, bool use_sequence, uint32_t sequence)
186 struct ttm_bo_global *glob = bo->glob;
189 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
190 if (use_sequence && bo->seq_valid &&
191 (sequence - bo->val_seq < (1 << 31))) {
198 spin_unlock(&glob->lru_lock);
199 ret = ttm_bo_wait_unreserved(bo, interruptible);
200 spin_lock(&glob->lru_lock);
207 bo->val_seq = sequence;
208 bo->seq_valid = true;
210 bo->seq_valid = false;
215 EXPORT_SYMBOL(ttm_bo_reserve);
217 static void ttm_bo_ref_bug(struct kref *list_kref)
222 int ttm_bo_reserve(struct ttm_buffer_object *bo,
224 bool no_wait, bool use_sequence, uint32_t sequence)
226 struct ttm_bo_global *glob = bo->glob;
230 spin_lock(&glob->lru_lock);
231 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
233 if (likely(ret == 0))
234 put_count = ttm_bo_del_from_lru(bo);
235 spin_unlock(&glob->lru_lock);
238 kref_put(&bo->list_kref, ttm_bo_ref_bug);
243 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
245 struct ttm_bo_global *glob = bo->glob;
247 spin_lock(&glob->lru_lock);
248 ttm_bo_add_to_lru(bo);
249 atomic_set(&bo->reserved, 0);
250 wake_up_all(&bo->event_queue);
251 spin_unlock(&glob->lru_lock);
253 EXPORT_SYMBOL(ttm_bo_unreserve);
256 * Call bo->mutex locked.
258 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
260 struct ttm_bo_device *bdev = bo->bdev;
261 struct ttm_bo_global *glob = bo->glob;
263 uint32_t page_flags = 0;
265 TTM_ASSERT_LOCKED(&bo->mutex);
268 if (bdev->need_dma32)
269 page_flags |= TTM_PAGE_FLAG_DMA32;
272 case ttm_bo_type_device:
274 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
275 case ttm_bo_type_kernel:
276 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
277 page_flags, glob->dummy_read_page);
278 if (unlikely(bo->ttm == NULL))
281 case ttm_bo_type_user:
282 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
283 page_flags | TTM_PAGE_FLAG_USER,
284 glob->dummy_read_page);
285 if (unlikely(bo->ttm == NULL)) {
290 ret = ttm_tt_set_user(bo->ttm, current,
291 bo->buffer_start, bo->num_pages);
292 if (unlikely(ret != 0))
293 ttm_tt_destroy(bo->ttm);
296 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
304 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
305 struct ttm_mem_reg *mem,
306 bool evict, bool interruptible, bool no_wait)
308 struct ttm_bo_device *bdev = bo->bdev;
309 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
310 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
311 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
312 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
315 if (old_is_pci || new_is_pci ||
316 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
317 ttm_bo_unmap_virtual(bo);
320 * Create and bind a ttm if required.
323 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
324 ret = ttm_bo_add_ttm(bo, false);
328 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
332 if (mem->mem_type != TTM_PL_SYSTEM) {
333 ret = ttm_tt_bind(bo->ttm, mem);
338 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
346 if (bdev->driver->move_notify)
347 bdev->driver->move_notify(bo, mem);
349 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
350 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
351 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
352 else if (bdev->driver->move)
353 ret = bdev->driver->move(bo, evict, interruptible,
356 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
363 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
365 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
369 if (bo->mem.mm_node) {
370 spin_lock(&bo->lock);
371 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
372 bdev->man[bo->mem.mem_type].gpu_offset;
373 bo->cur_placement = bo->mem.placement;
374 spin_unlock(&bo->lock);
380 new_man = &bdev->man[bo->mem.mem_type];
381 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
382 ttm_tt_unbind(bo->ttm);
383 ttm_tt_destroy(bo->ttm);
391 * If bo idle, remove from delayed- and lru lists, and unref.
392 * If not idle, and already on delayed list, do nothing.
393 * If not idle, and not on delayed list, put on delayed list,
394 * up the list_kref and schedule a delayed list check.
397 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
399 struct ttm_bo_device *bdev = bo->bdev;
400 struct ttm_bo_global *glob = bo->glob;
401 struct ttm_bo_driver *driver = bdev->driver;
404 spin_lock(&bo->lock);
405 (void) ttm_bo_wait(bo, false, false, !remove_all);
410 spin_unlock(&bo->lock);
412 spin_lock(&glob->lru_lock);
413 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
416 ttm_tt_unbind(bo->ttm);
418 if (!list_empty(&bo->ddestroy)) {
419 list_del_init(&bo->ddestroy);
420 kref_put(&bo->list_kref, ttm_bo_ref_bug);
422 if (bo->mem.mm_node) {
423 bo->mem.mm_node->private = NULL;
424 drm_mm_put_block(bo->mem.mm_node);
425 bo->mem.mm_node = NULL;
427 put_count = ttm_bo_del_from_lru(bo);
428 spin_unlock(&glob->lru_lock);
430 atomic_set(&bo->reserved, 0);
433 kref_put(&bo->list_kref, ttm_bo_release_list);
438 spin_lock(&glob->lru_lock);
439 if (list_empty(&bo->ddestroy)) {
440 void *sync_obj = bo->sync_obj;
441 void *sync_obj_arg = bo->sync_obj_arg;
443 kref_get(&bo->list_kref);
444 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
445 spin_unlock(&glob->lru_lock);
446 spin_unlock(&bo->lock);
449 driver->sync_obj_flush(sync_obj, sync_obj_arg);
450 schedule_delayed_work(&bdev->wq,
451 ((HZ / 100) < 1) ? 1 : HZ / 100);
455 spin_unlock(&glob->lru_lock);
456 spin_unlock(&bo->lock);
464 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
465 * encountered buffers.
468 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
470 struct ttm_bo_global *glob = bdev->glob;
471 struct ttm_buffer_object *entry, *nentry;
472 struct list_head *list, *next;
475 spin_lock(&glob->lru_lock);
476 list_for_each_safe(list, next, &bdev->ddestroy) {
477 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
481 * Protect the next list entry from destruction while we
482 * unlock the lru_lock.
485 if (next != &bdev->ddestroy) {
486 nentry = list_entry(next, struct ttm_buffer_object,
488 kref_get(&nentry->list_kref);
490 kref_get(&entry->list_kref);
492 spin_unlock(&glob->lru_lock);
493 ret = ttm_bo_cleanup_refs(entry, remove_all);
494 kref_put(&entry->list_kref, ttm_bo_release_list);
496 spin_lock(&glob->lru_lock);
498 bool next_onlist = !list_empty(next);
499 spin_unlock(&glob->lru_lock);
500 kref_put(&nentry->list_kref, ttm_bo_release_list);
501 spin_lock(&glob->lru_lock);
503 * Someone might have raced us and removed the
504 * next entry from the list. We don't bother restarting
514 ret = !list_empty(&bdev->ddestroy);
515 spin_unlock(&glob->lru_lock);
520 static void ttm_bo_delayed_workqueue(struct work_struct *work)
522 struct ttm_bo_device *bdev =
523 container_of(work, struct ttm_bo_device, wq.work);
525 if (ttm_bo_delayed_delete(bdev, false)) {
526 schedule_delayed_work(&bdev->wq,
527 ((HZ / 100) < 1) ? 1 : HZ / 100);
531 static void ttm_bo_release(struct kref *kref)
533 struct ttm_buffer_object *bo =
534 container_of(kref, struct ttm_buffer_object, kref);
535 struct ttm_bo_device *bdev = bo->bdev;
537 if (likely(bo->vm_node != NULL)) {
538 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
539 drm_mm_put_block(bo->vm_node);
542 write_unlock(&bdev->vm_lock);
543 ttm_bo_cleanup_refs(bo, false);
544 kref_put(&bo->list_kref, ttm_bo_release_list);
545 write_lock(&bdev->vm_lock);
548 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
550 struct ttm_buffer_object *bo = *p_bo;
551 struct ttm_bo_device *bdev = bo->bdev;
554 write_lock(&bdev->vm_lock);
555 kref_put(&bo->kref, ttm_bo_release);
556 write_unlock(&bdev->vm_lock);
558 EXPORT_SYMBOL(ttm_bo_unref);
560 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
563 struct ttm_bo_device *bdev = bo->bdev;
564 struct ttm_bo_global *glob = bo->glob;
565 struct ttm_mem_reg evict_mem;
566 struct ttm_placement placement;
569 spin_lock(&bo->lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
571 spin_unlock(&bo->lock);
573 if (unlikely(ret != 0)) {
574 if (ret != -ERESTART) {
575 printk(KERN_ERR TTM_PFX
576 "Failed to expire sync object before "
577 "buffer eviction.\n");
582 BUG_ON(!atomic_read(&bo->reserved));
585 evict_mem.mm_node = NULL;
587 bdev->driver->evict_flags(bo, &placement);
588 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
591 if (ret != -ERESTART)
592 printk(KERN_ERR TTM_PFX
593 "Failed to find memory space for "
594 "buffer 0x%p eviction.\n", bo);
598 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
601 if (ret != -ERESTART)
602 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
603 spin_lock(&glob->lru_lock);
604 if (evict_mem.mm_node) {
605 evict_mem.mm_node->private = NULL;
606 drm_mm_put_block(evict_mem.mm_node);
607 evict_mem.mm_node = NULL;
609 spin_unlock(&glob->lru_lock);
617 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
619 bool interruptible, bool no_wait)
621 struct ttm_bo_global *glob = bdev->glob;
622 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
623 struct ttm_buffer_object *bo;
624 int ret, put_count = 0;
626 spin_lock(&glob->lru_lock);
627 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
628 kref_get(&bo->list_kref);
629 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
630 if (likely(ret == 0))
631 put_count = ttm_bo_del_from_lru(bo);
632 spin_unlock(&glob->lru_lock);
633 if (unlikely(ret != 0))
636 kref_put(&bo->list_kref, ttm_bo_ref_bug);
637 ret = ttm_bo_evict(bo, interruptible, no_wait);
638 ttm_bo_unreserve(bo);
639 kref_put(&bo->list_kref, ttm_bo_release_list);
643 static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
644 struct ttm_mem_type_manager *man,
645 struct ttm_placement *placement,
646 struct ttm_mem_reg *mem,
647 struct drm_mm_node **node)
649 struct ttm_bo_global *glob = bo->glob;
653 lpfn = placement->lpfn;
658 ret = drm_mm_pre_get(&man->manager);
662 spin_lock(&glob->lru_lock);
663 *node = drm_mm_search_free_in_range(&man->manager,
664 mem->num_pages, mem->page_alignment,
665 placement->fpfn, lpfn, 1);
666 if (unlikely(*node == NULL)) {
667 spin_unlock(&glob->lru_lock);
670 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
674 spin_unlock(&glob->lru_lock);
675 } while (*node == NULL);
680 * Repeatedly evict memory from the LRU for @mem_type until we create enough
681 * space, or we've evicted everything and there isn't enough space.
683 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
685 struct ttm_placement *placement,
686 struct ttm_mem_reg *mem,
687 bool interruptible, bool no_wait)
689 struct ttm_bo_device *bdev = bo->bdev;
690 struct ttm_bo_global *glob = bdev->glob;
691 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
692 struct drm_mm_node *node;
696 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
697 if (unlikely(ret != 0))
701 spin_lock(&glob->lru_lock);
702 if (list_empty(&man->lru)) {
703 spin_unlock(&glob->lru_lock);
706 spin_unlock(&glob->lru_lock);
707 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
709 if (unlikely(ret != 0))
715 mem->mem_type = mem_type;
719 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
720 uint32_t cur_placement,
721 uint32_t proposed_placement)
723 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
724 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
727 * Keep current caching if possible.
730 if ((cur_placement & caching) != 0)
731 result |= (cur_placement & caching);
732 else if ((man->default_caching & caching) != 0)
733 result |= man->default_caching;
734 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
735 result |= TTM_PL_FLAG_CACHED;
736 else if ((TTM_PL_FLAG_WC & caching) != 0)
737 result |= TTM_PL_FLAG_WC;
738 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
739 result |= TTM_PL_FLAG_UNCACHED;
744 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
747 uint32_t proposed_placement,
748 uint32_t *masked_placement)
750 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
752 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
755 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
758 if ((proposed_placement & man->available_caching) == 0)
761 cur_flags |= (proposed_placement & man->available_caching);
763 *masked_placement = cur_flags;
767 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
771 for (i = 0; i <= TTM_PL_PRIV5; i++)
772 if (flags & (1 << i)) {
780 * Creates space for memory region @mem according to its type.
782 * This function first searches for free space in compatible memory types in
783 * the priority order defined by the driver. If free space isn't found, then
784 * ttm_bo_mem_force_space is attempted in priority order to evict and find
787 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
788 struct ttm_placement *placement,
789 struct ttm_mem_reg *mem,
790 bool interruptible, bool no_wait)
792 struct ttm_bo_device *bdev = bo->bdev;
793 struct ttm_mem_type_manager *man;
794 uint32_t mem_type = TTM_PL_SYSTEM;
795 uint32_t cur_flags = 0;
796 bool type_found = false;
797 bool type_ok = false;
798 bool has_eagain = false;
799 struct drm_mm_node *node = NULL;
803 for (i = 0; i <= placement->num_placement; ++i) {
804 ret = ttm_mem_type_from_flags(placement->placement[i],
808 man = &bdev->man[mem_type];
810 type_ok = ttm_bo_mt_compatible(man,
811 bo->type == ttm_bo_type_user,
813 placement->placement[i],
819 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
822 * Use the access and other non-mapping-related flag bits from
823 * the memory placement flags to the current flags
825 ttm_flag_masked(&cur_flags, placement->placement[i],
826 ~TTM_PL_MASK_MEMTYPE);
828 if (mem_type == TTM_PL_SYSTEM)
831 if (man->has_type && man->use_type) {
833 ret = ttm_bo_man_get_node(bo, man, placement, mem,
842 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
844 mem->mem_type = mem_type;
845 mem->placement = cur_flags;
854 for (i = 0; i <= placement->num_busy_placement; ++i) {
855 ret = ttm_mem_type_from_flags(placement->placement[i],
859 man = &bdev->man[mem_type];
862 if (!ttm_bo_mt_compatible(man,
863 bo->type == ttm_bo_type_user,
865 placement->placement[i],
869 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
872 * Use the access and other non-mapping-related flag bits from
873 * the memory placement flags to the current flags
875 ttm_flag_masked(&cur_flags, placement->placement[i],
876 ~TTM_PL_MASK_MEMTYPE);
878 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
879 interruptible, no_wait);
880 if (ret == 0 && mem->mm_node) {
881 mem->placement = cur_flags;
882 mem->mm_node->private = bo;
885 if (ret == -ERESTART)
888 ret = (has_eagain) ? -ERESTART : -ENOMEM;
891 EXPORT_SYMBOL(ttm_bo_mem_space);
893 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
897 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
900 ret = wait_event_interruptible(bo->event_queue,
901 atomic_read(&bo->cpu_writers) == 0);
903 if (ret == -ERESTARTSYS)
909 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
910 struct ttm_placement *placement,
911 bool interruptible, bool no_wait)
913 struct ttm_bo_global *glob = bo->glob;
915 struct ttm_mem_reg mem;
917 BUG_ON(!atomic_read(&bo->reserved));
920 * FIXME: It's possible to pipeline buffer moves.
921 * Have the driver move function wait for idle when necessary,
922 * instead of doing it here.
924 spin_lock(&bo->lock);
925 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
926 spin_unlock(&bo->lock);
929 mem.num_pages = bo->num_pages;
930 mem.size = mem.num_pages << PAGE_SHIFT;
931 mem.page_alignment = bo->mem.page_alignment;
933 * Determine where to move the buffer.
935 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
938 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
940 if (ret && mem.mm_node) {
941 spin_lock(&glob->lru_lock);
942 mem.mm_node->private = NULL;
943 drm_mm_put_block(mem.mm_node);
944 spin_unlock(&glob->lru_lock);
949 static int ttm_bo_mem_compat(struct ttm_placement *placement,
950 struct ttm_mem_reg *mem)
954 for (i = 0; i < placement->num_placement; i++) {
955 if ((placement->placement[i] & mem->placement &
956 TTM_PL_MASK_CACHING) &&
957 (placement->placement[i] & mem->placement &
964 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
965 struct ttm_placement *placement,
966 bool interruptible, bool no_wait)
970 BUG_ON(!atomic_read(&bo->reserved));
971 /* Check that range is valid */
972 if (placement->lpfn || placement->fpfn)
973 if (placement->fpfn > placement->lpfn ||
974 (placement->lpfn - placement->fpfn) < bo->num_pages)
977 * Check whether we need to move buffer.
979 ret = ttm_bo_mem_compat(placement, &bo->mem);
981 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
986 * Use the access and other non-mapping-related flag bits from
987 * the compatible memory placement flags to the active flags
989 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
990 ~TTM_PL_MASK_MEMTYPE);
993 * We might need to add a TTM.
995 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
996 ret = ttm_bo_add_ttm(bo, true);
1002 EXPORT_SYMBOL(ttm_buffer_object_validate);
1005 ttm_bo_check_placement(struct ttm_buffer_object *bo,
1006 uint32_t set_flags, uint32_t clr_flags)
1008 uint32_t new_mask = set_flags | clr_flags;
1010 if ((bo->type == ttm_bo_type_user) &&
1011 (clr_flags & TTM_PL_FLAG_CACHED)) {
1012 printk(KERN_ERR TTM_PFX
1013 "User buffers require cache-coherent memory.\n");
1017 if (!capable(CAP_SYS_ADMIN)) {
1018 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1019 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1020 " NO_EVICT status.\n");
1024 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
1025 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1026 printk(KERN_ERR TTM_PFX
1027 "Incompatible memory specification"
1028 " for NO_EVICT buffer.\n");
1035 int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1036 struct ttm_buffer_object *bo,
1038 enum ttm_bo_type type,
1040 uint32_t page_alignment,
1041 unsigned long buffer_start,
1043 struct file *persistant_swap_storage,
1045 void (*destroy) (struct ttm_buffer_object *))
1048 unsigned long num_pages;
1049 uint32_t placements[8];
1050 struct ttm_placement placement;
1052 size += buffer_start & ~PAGE_MASK;
1053 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1054 if (num_pages == 0) {
1055 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1058 bo->destroy = destroy;
1060 spin_lock_init(&bo->lock);
1061 kref_init(&bo->kref);
1062 kref_init(&bo->list_kref);
1063 atomic_set(&bo->cpu_writers, 0);
1064 atomic_set(&bo->reserved, 1);
1065 init_waitqueue_head(&bo->event_queue);
1066 INIT_LIST_HEAD(&bo->lru);
1067 INIT_LIST_HEAD(&bo->ddestroy);
1068 INIT_LIST_HEAD(&bo->swap);
1070 bo->glob = bdev->glob;
1072 bo->num_pages = num_pages;
1073 bo->mem.mem_type = TTM_PL_SYSTEM;
1074 bo->mem.num_pages = bo->num_pages;
1075 bo->mem.mm_node = NULL;
1076 bo->mem.page_alignment = page_alignment;
1077 bo->buffer_start = buffer_start & PAGE_MASK;
1079 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1080 bo->seq_valid = false;
1081 bo->persistant_swap_storage = persistant_swap_storage;
1082 bo->acc_size = acc_size;
1083 atomic_inc(&bo->glob->bo_count);
1085 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1086 if (unlikely(ret != 0))
1090 * If no caching attributes are set, accept any form of caching.
1093 if ((flags & TTM_PL_MASK_CACHING) == 0)
1094 flags |= TTM_PL_MASK_CACHING;
1097 * For ttm_bo_type_device buffers, allocate
1098 * address space from the device.
1101 if (bo->type == ttm_bo_type_device) {
1102 ret = ttm_bo_setup_vm(bo);
1109 for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
1110 if (flags & (1 << i))
1111 placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
1112 placement.placement = placements;
1113 placement.num_placement = c;
1114 placement.busy_placement = placements;
1115 placement.num_busy_placement = c;
1116 ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
1120 ttm_bo_unreserve(bo);
1124 ttm_bo_unreserve(bo);
1129 EXPORT_SYMBOL(ttm_buffer_object_init);
1131 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1132 unsigned long num_pages)
1134 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1137 return glob->ttm_bo_size + 2 * page_array_size;
1140 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1142 enum ttm_bo_type type,
1144 uint32_t page_alignment,
1145 unsigned long buffer_start,
1147 struct file *persistant_swap_storage,
1148 struct ttm_buffer_object **p_bo)
1150 struct ttm_buffer_object *bo;
1151 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1155 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1156 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1157 if (unlikely(ret != 0))
1160 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1162 if (unlikely(bo == NULL)) {
1163 ttm_mem_global_free(mem_glob, acc_size);
1167 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1168 page_alignment, buffer_start,
1170 persistant_swap_storage, acc_size, NULL);
1171 if (likely(ret == 0))
1177 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1178 unsigned mem_type, bool allow_errors)
1180 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1181 struct ttm_bo_global *glob = bdev->glob;
1185 * Can't use standard list traversal since we're unlocking.
1188 spin_lock(&glob->lru_lock);
1189 while (!list_empty(&man->lru)) {
1190 spin_unlock(&glob->lru_lock);
1191 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1196 printk(KERN_ERR TTM_PFX
1197 "Cleanup eviction failed\n");
1200 spin_lock(&glob->lru_lock);
1202 spin_unlock(&glob->lru_lock);
1206 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1208 struct ttm_bo_global *glob = bdev->glob;
1209 struct ttm_mem_type_manager *man;
1212 if (mem_type >= TTM_NUM_MEM_TYPES) {
1213 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1216 man = &bdev->man[mem_type];
1218 if (!man->has_type) {
1219 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1220 "memory manager type %u\n", mem_type);
1224 man->use_type = false;
1225 man->has_type = false;
1229 ttm_bo_force_list_clean(bdev, mem_type, false);
1231 spin_lock(&glob->lru_lock);
1232 if (drm_mm_clean(&man->manager))
1233 drm_mm_takedown(&man->manager);
1237 spin_unlock(&glob->lru_lock);
1242 EXPORT_SYMBOL(ttm_bo_clean_mm);
1244 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1246 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1248 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1249 printk(KERN_ERR TTM_PFX
1250 "Illegal memory manager memory type %u.\n",
1255 if (!man->has_type) {
1256 printk(KERN_ERR TTM_PFX
1257 "Memory type %u has not been initialized.\n",
1262 return ttm_bo_force_list_clean(bdev, mem_type, true);
1264 EXPORT_SYMBOL(ttm_bo_evict_mm);
1266 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1267 unsigned long p_size)
1270 struct ttm_mem_type_manager *man;
1272 if (type >= TTM_NUM_MEM_TYPES) {
1273 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1277 man = &bdev->man[type];
1278 if (man->has_type) {
1279 printk(KERN_ERR TTM_PFX
1280 "Memory manager already initialized for type %d\n",
1285 ret = bdev->driver->init_mem_type(bdev, type, man);
1290 if (type != TTM_PL_SYSTEM) {
1292 printk(KERN_ERR TTM_PFX
1293 "Zero size memory manager type %d\n",
1297 ret = drm_mm_init(&man->manager, 0, p_size);
1301 man->has_type = true;
1302 man->use_type = true;
1305 INIT_LIST_HEAD(&man->lru);
1309 EXPORT_SYMBOL(ttm_bo_init_mm);
1311 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1313 struct ttm_bo_global *glob =
1314 container_of(kobj, struct ttm_bo_global, kobj);
1316 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1317 __free_page(glob->dummy_read_page);
1321 void ttm_bo_global_release(struct ttm_global_reference *ref)
1323 struct ttm_bo_global *glob = ref->object;
1325 kobject_del(&glob->kobj);
1326 kobject_put(&glob->kobj);
1328 EXPORT_SYMBOL(ttm_bo_global_release);
1330 int ttm_bo_global_init(struct ttm_global_reference *ref)
1332 struct ttm_bo_global_ref *bo_ref =
1333 container_of(ref, struct ttm_bo_global_ref, ref);
1334 struct ttm_bo_global *glob = ref->object;
1337 mutex_init(&glob->device_list_mutex);
1338 spin_lock_init(&glob->lru_lock);
1339 glob->mem_glob = bo_ref->mem_glob;
1340 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1342 if (unlikely(glob->dummy_read_page == NULL)) {
1347 INIT_LIST_HEAD(&glob->swap_lru);
1348 INIT_LIST_HEAD(&glob->device_list);
1350 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1351 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1352 if (unlikely(ret != 0)) {
1353 printk(KERN_ERR TTM_PFX
1354 "Could not register buffer object swapout.\n");
1358 glob->ttm_bo_extra_size =
1359 ttm_round_pot(sizeof(struct ttm_tt)) +
1360 ttm_round_pot(sizeof(struct ttm_backend));
1362 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1363 ttm_round_pot(sizeof(struct ttm_buffer_object));
1365 atomic_set(&glob->bo_count, 0);
1367 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1368 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1369 if (unlikely(ret != 0))
1370 kobject_put(&glob->kobj);
1373 __free_page(glob->dummy_read_page);
1378 EXPORT_SYMBOL(ttm_bo_global_init);
1381 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1384 unsigned i = TTM_NUM_MEM_TYPES;
1385 struct ttm_mem_type_manager *man;
1386 struct ttm_bo_global *glob = bdev->glob;
1389 man = &bdev->man[i];
1390 if (man->has_type) {
1391 man->use_type = false;
1392 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1394 printk(KERN_ERR TTM_PFX
1395 "DRM memory manager type %d "
1396 "is not clean.\n", i);
1398 man->has_type = false;
1402 mutex_lock(&glob->device_list_mutex);
1403 list_del(&bdev->device_list);
1404 mutex_unlock(&glob->device_list_mutex);
1406 if (!cancel_delayed_work(&bdev->wq))
1407 flush_scheduled_work();
1409 while (ttm_bo_delayed_delete(bdev, true))
1412 spin_lock(&glob->lru_lock);
1413 if (list_empty(&bdev->ddestroy))
1414 TTM_DEBUG("Delayed destroy list was clean\n");
1416 if (list_empty(&bdev->man[0].lru))
1417 TTM_DEBUG("Swap list was clean\n");
1418 spin_unlock(&glob->lru_lock);
1420 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1421 write_lock(&bdev->vm_lock);
1422 drm_mm_takedown(&bdev->addr_space_mm);
1423 write_unlock(&bdev->vm_lock);
1427 EXPORT_SYMBOL(ttm_bo_device_release);
1429 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1430 struct ttm_bo_global *glob,
1431 struct ttm_bo_driver *driver,
1432 uint64_t file_page_offset,
1437 rwlock_init(&bdev->vm_lock);
1438 bdev->driver = driver;
1440 memset(bdev->man, 0, sizeof(bdev->man));
1443 * Initialize the system memory buffer type.
1444 * Other types need to be driver / IOCTL initialized.
1446 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1447 if (unlikely(ret != 0))
1450 bdev->addr_space_rb = RB_ROOT;
1451 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1452 if (unlikely(ret != 0))
1453 goto out_no_addr_mm;
1455 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1456 bdev->nice_mode = true;
1457 INIT_LIST_HEAD(&bdev->ddestroy);
1458 bdev->dev_mapping = NULL;
1460 bdev->need_dma32 = need_dma32;
1462 mutex_lock(&glob->device_list_mutex);
1463 list_add_tail(&bdev->device_list, &glob->device_list);
1464 mutex_unlock(&glob->device_list_mutex);
1468 ttm_bo_clean_mm(bdev, 0);
1472 EXPORT_SYMBOL(ttm_bo_device_init);
1475 * buffer object vm functions.
1478 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1480 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1482 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1483 if (mem->mem_type == TTM_PL_SYSTEM)
1486 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1489 if (mem->placement & TTM_PL_FLAG_CACHED)
1495 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1496 struct ttm_mem_reg *mem,
1497 unsigned long *bus_base,
1498 unsigned long *bus_offset, unsigned long *bus_size)
1500 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1503 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1506 if (ttm_mem_reg_is_pci(bdev, mem)) {
1507 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1508 *bus_size = mem->num_pages << PAGE_SHIFT;
1509 *bus_base = man->io_offset;
1515 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1517 struct ttm_bo_device *bdev = bo->bdev;
1518 loff_t offset = (loff_t) bo->addr_space_offset;
1519 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1521 if (!bdev->dev_mapping)
1524 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1526 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1528 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1530 struct ttm_bo_device *bdev = bo->bdev;
1531 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1532 struct rb_node *parent = NULL;
1533 struct ttm_buffer_object *cur_bo;
1534 unsigned long offset = bo->vm_node->start;
1535 unsigned long cur_offset;
1539 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1540 cur_offset = cur_bo->vm_node->start;
1541 if (offset < cur_offset)
1542 cur = &parent->rb_left;
1543 else if (offset > cur_offset)
1544 cur = &parent->rb_right;
1549 rb_link_node(&bo->vm_rb, parent, cur);
1550 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1556 * @bo: the buffer to allocate address space for
1558 * Allocate address space in the drm device so that applications
1559 * can mmap the buffer and access the contents. This only
1560 * applies to ttm_bo_type_device objects as others are not
1561 * placed in the drm device address space.
1564 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1566 struct ttm_bo_device *bdev = bo->bdev;
1570 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1571 if (unlikely(ret != 0))
1574 write_lock(&bdev->vm_lock);
1575 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1576 bo->mem.num_pages, 0, 0);
1578 if (unlikely(bo->vm_node == NULL)) {
1583 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1584 bo->mem.num_pages, 0);
1586 if (unlikely(bo->vm_node == NULL)) {
1587 write_unlock(&bdev->vm_lock);
1591 ttm_bo_vm_insert_rb(bo);
1592 write_unlock(&bdev->vm_lock);
1593 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1597 write_unlock(&bdev->vm_lock);
1601 int ttm_bo_wait(struct ttm_buffer_object *bo,
1602 bool lazy, bool interruptible, bool no_wait)
1604 struct ttm_bo_driver *driver = bo->bdev->driver;
1609 if (likely(bo->sync_obj == NULL))
1612 while (bo->sync_obj) {
1614 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1615 void *tmp_obj = bo->sync_obj;
1616 bo->sync_obj = NULL;
1617 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1618 spin_unlock(&bo->lock);
1619 driver->sync_obj_unref(&tmp_obj);
1620 spin_lock(&bo->lock);
1627 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1628 sync_obj_arg = bo->sync_obj_arg;
1629 spin_unlock(&bo->lock);
1630 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1631 lazy, interruptible);
1632 if (unlikely(ret != 0)) {
1633 driver->sync_obj_unref(&sync_obj);
1634 spin_lock(&bo->lock);
1637 spin_lock(&bo->lock);
1638 if (likely(bo->sync_obj == sync_obj &&
1639 bo->sync_obj_arg == sync_obj_arg)) {
1640 void *tmp_obj = bo->sync_obj;
1641 bo->sync_obj = NULL;
1642 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1644 spin_unlock(&bo->lock);
1645 driver->sync_obj_unref(&sync_obj);
1646 driver->sync_obj_unref(&tmp_obj);
1647 spin_lock(&bo->lock);
1649 spin_unlock(&bo->lock);
1650 driver->sync_obj_unref(&sync_obj);
1651 spin_lock(&bo->lock);
1656 EXPORT_SYMBOL(ttm_bo_wait);
1658 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1660 atomic_set(&bo->reserved, 0);
1661 wake_up_all(&bo->event_queue);
1664 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1669 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1672 else if (interruptible) {
1673 ret = wait_event_interruptible
1674 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1675 if (unlikely(ret != 0))
1678 wait_event(bo->event_queue,
1679 atomic_read(&bo->reserved) == 0);
1685 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1690 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1691 * makes sure the lru lists are updated.
1694 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1695 if (unlikely(ret != 0))
1697 spin_lock(&bo->lock);
1698 ret = ttm_bo_wait(bo, false, true, no_wait);
1699 spin_unlock(&bo->lock);
1700 if (likely(ret == 0))
1701 atomic_inc(&bo->cpu_writers);
1702 ttm_bo_unreserve(bo);
1706 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1708 if (atomic_dec_and_test(&bo->cpu_writers))
1709 wake_up_all(&bo->event_queue);
1713 * A buffer object shrink method that tries to swap out the first
1714 * buffer object on the bo_global::swap_lru list.
1717 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1719 struct ttm_bo_global *glob =
1720 container_of(shrink, struct ttm_bo_global, shrink);
1721 struct ttm_buffer_object *bo;
1724 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1726 spin_lock(&glob->lru_lock);
1727 while (ret == -EBUSY) {
1728 if (unlikely(list_empty(&glob->swap_lru))) {
1729 spin_unlock(&glob->lru_lock);
1733 bo = list_first_entry(&glob->swap_lru,
1734 struct ttm_buffer_object, swap);
1735 kref_get(&bo->list_kref);
1738 * Reserve buffer. Since we unlock while sleeping, we need
1739 * to re-check that nobody removed us from the swap-list while
1743 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1744 if (unlikely(ret == -EBUSY)) {
1745 spin_unlock(&glob->lru_lock);
1746 ttm_bo_wait_unreserved(bo, false);
1747 kref_put(&bo->list_kref, ttm_bo_release_list);
1748 spin_lock(&glob->lru_lock);
1753 put_count = ttm_bo_del_from_lru(bo);
1754 spin_unlock(&glob->lru_lock);
1757 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1760 * Wait for GPU, then move to system cached.
1763 spin_lock(&bo->lock);
1764 ret = ttm_bo_wait(bo, false, false, false);
1765 spin_unlock(&bo->lock);
1767 if (unlikely(ret != 0))
1770 if ((bo->mem.placement & swap_placement) != swap_placement) {
1771 struct ttm_mem_reg evict_mem;
1773 evict_mem = bo->mem;
1774 evict_mem.mm_node = NULL;
1775 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1776 evict_mem.mem_type = TTM_PL_SYSTEM;
1778 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1780 if (unlikely(ret != 0))
1784 ttm_bo_unmap_virtual(bo);
1787 * Swap out. Buffer will be swapped in again as soon as
1788 * anyone tries to access a ttm page.
1791 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1796 * Unreserve without putting on LRU to avoid swapping out an
1797 * already swapped buffer.
1800 atomic_set(&bo->reserved, 0);
1801 wake_up_all(&bo->event_queue);
1802 kref_put(&bo->list_kref, ttm_bo_release_list);
1806 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1808 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)