2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
26 * Authors: Dave Airlie <airlied@redhat.com>
29 #include "cirrus_drv.h"
30 #include <ttm/ttm_page_alloc.h>
32 static inline struct cirrus_device *
33 cirrus_bdev(struct ttm_bo_device *bd)
35 return container_of(bd, struct cirrus_device, ttm.bdev);
39 cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
41 return ttm_mem_global_init(ref->object);
45 cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
47 ttm_mem_global_release(ref->object);
50 static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
52 struct drm_global_reference *global_ref;
55 global_ref = &cirrus->ttm.mem_global_ref;
56 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
57 global_ref->size = sizeof(struct ttm_mem_global);
58 global_ref->init = &cirrus_ttm_mem_global_init;
59 global_ref->release = &cirrus_ttm_mem_global_release;
60 r = drm_global_item_ref(global_ref);
62 DRM_ERROR("Failed setting up TTM memory accounting "
67 cirrus->ttm.bo_global_ref.mem_glob =
68 cirrus->ttm.mem_global_ref.object;
69 global_ref = &cirrus->ttm.bo_global_ref.ref;
70 global_ref->global_type = DRM_GLOBAL_TTM_BO;
71 global_ref->size = sizeof(struct ttm_bo_global);
72 global_ref->init = &ttm_bo_global_init;
73 global_ref->release = &ttm_bo_global_release;
74 r = drm_global_item_ref(global_ref);
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&cirrus->ttm.mem_global_ref);
84 cirrus_ttm_global_release(struct cirrus_device *cirrus)
86 if (cirrus->ttm.mem_global_ref.release == NULL)
89 drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&cirrus->ttm.mem_global_ref);
91 cirrus->ttm.mem_global_ref.release = NULL;
95 static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
99 bo = container_of(tbo, struct cirrus_bo, bo);
101 drm_gem_object_release(&bo->gem);
105 static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
107 if (bo->destroy == &cirrus_bo_ttm_destroy)
113 cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
114 struct ttm_mem_type_manager *man)
118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
119 man->available_caching = TTM_PL_MASK_CACHING;
120 man->default_caching = TTM_PL_FLAG_CACHED;
123 man->func = &ttm_bo_manager_func;
124 man->flags = TTM_MEMTYPE_FLAG_FIXED |
125 TTM_MEMTYPE_FLAG_MAPPABLE;
126 man->available_caching = TTM_PL_FLAG_UNCACHED |
128 man->default_caching = TTM_PL_FLAG_WC;
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
138 cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
140 struct cirrus_bo *cirrusbo = cirrus_bo(bo);
142 if (!cirrus_ttm_bo_is_cirrus_bo(bo))
145 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
146 *pl = cirrusbo->placement;
149 static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
151 struct cirrus_bo *cirrusbo = cirrus_bo(bo);
153 return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
156 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
157 struct ttm_mem_reg *mem)
159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
160 struct cirrus_device *cirrus = cirrus_bdev(bdev);
162 mem->bus.addr = NULL;
164 mem->bus.size = mem->num_pages << PAGE_SHIFT;
166 mem->bus.is_iomem = false;
167 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
169 switch (mem->mem_type) {
174 mem->bus.offset = mem->start << PAGE_SHIFT;
175 mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
176 mem->bus.is_iomem = true;
185 static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
189 static int cirrus_bo_move(struct ttm_buffer_object *bo,
190 bool evict, bool interruptible,
192 struct ttm_mem_reg *new_mem)
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
200 static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
206 static struct ttm_backend_func cirrus_tt_backend_func = {
207 .destroy = &cirrus_ttm_backend_destroy,
211 static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
212 unsigned long size, uint32_t page_flags,
213 struct page *dummy_read_page)
217 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
220 tt->func = &cirrus_tt_backend_func;
221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
228 static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
230 return ttm_pool_populate(ttm);
233 static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
235 ttm_pool_unpopulate(ttm);
238 struct ttm_bo_driver cirrus_bo_driver = {
239 .ttm_tt_create = cirrus_ttm_tt_create,
240 .ttm_tt_populate = cirrus_ttm_tt_populate,
241 .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
242 .init_mem_type = cirrus_bo_init_mem_type,
243 .evict_flags = cirrus_bo_evict_flags,
244 .move = cirrus_bo_move,
245 .verify_access = cirrus_bo_verify_access,
246 .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
247 .io_mem_free = &cirrus_ttm_io_mem_free,
250 int cirrus_mm_init(struct cirrus_device *cirrus)
253 struct drm_device *dev = cirrus->dev;
254 struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
256 ret = cirrus_ttm_global_init(cirrus);
260 ret = ttm_bo_device_init(&cirrus->ttm.bdev,
261 cirrus->ttm.bo_global_ref.ref.object,
263 dev->anon_inode->i_mapping,
264 DRM_FILE_PAGE_OFFSET,
267 DRM_ERROR("Error initialising bo driver; %d\n", ret);
271 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
272 cirrus->mc.vram_size >> PAGE_SHIFT);
274 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
278 cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
279 pci_resource_len(dev->pdev, 0));
281 cirrus->mm_inited = true;
285 void cirrus_mm_fini(struct cirrus_device *cirrus)
287 if (!cirrus->mm_inited)
290 ttm_bo_device_release(&cirrus->ttm.bdev);
292 cirrus_ttm_global_release(cirrus);
294 arch_phys_wc_del(cirrus->fb_mtrr);
298 void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
301 bo->placement.fpfn = 0;
302 bo->placement.lpfn = 0;
303 bo->placement.placement = bo->placements;
304 bo->placement.busy_placement = bo->placements;
305 if (domain & TTM_PL_FLAG_VRAM)
306 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
307 if (domain & TTM_PL_FLAG_SYSTEM)
308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
310 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
311 bo->placement.num_placement = c;
312 bo->placement.num_busy_placement = c;
315 int cirrus_bo_create(struct drm_device *dev, int size, int align,
316 uint32_t flags, struct cirrus_bo **pcirrusbo)
318 struct cirrus_device *cirrus = dev->dev_private;
319 struct cirrus_bo *cirrusbo;
323 cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
327 ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
333 cirrusbo->bo.bdev = &cirrus->ttm.bdev;
335 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
337 acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
338 sizeof(struct cirrus_bo));
340 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
341 ttm_bo_type_device, &cirrusbo->placement,
342 align >> PAGE_SHIFT, false, NULL, acc_size,
343 NULL, cirrus_bo_ttm_destroy);
347 *pcirrusbo = cirrusbo;
351 static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
353 return bo->bo.offset;
356 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
363 *gpu_addr = cirrus_bo_gpu_offset(bo);
366 cirrus_ttm_placement(bo, pl_flag);
367 for (i = 0; i < bo->placement.num_placement; i++)
368 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
375 *gpu_addr = cirrus_bo_gpu_offset(bo);
379 int cirrus_bo_push_sysram(struct cirrus_bo *bo)
382 if (!bo->pin_count) {
383 DRM_ERROR("unpin bad %p\n", bo);
390 if (bo->kmap.virtual)
391 ttm_bo_kunmap(&bo->kmap);
393 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
394 for (i = 0; i < bo->placement.num_placement ; i++)
395 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
397 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
399 DRM_ERROR("pushing to VRAM failed\n");
405 int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
407 struct drm_file *file_priv;
408 struct cirrus_device *cirrus;
410 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
411 return drm_mmap(filp, vma);
413 file_priv = filp->private_data;
414 cirrus = file_priv->minor->dev->dev_private;
415 return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);