2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "radeon_drm.h"
33 int radeon_gem_object_init(struct drm_gem_object *obj)
40 void radeon_gem_object_free(struct drm_gem_object *gobj)
42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
45 radeon_bo_unref(&robj);
49 int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 struct drm_gem_object **obj)
54 struct radeon_bo *robj;
58 /* At least align on page size */
59 if (alignment < PAGE_SIZE) {
60 alignment = PAGE_SIZE;
62 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
64 if (r != -ERESTARTSYS)
65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
66 size, initial_domain, alignment, r);
69 *obj = &robj->gem_base;
71 mutex_lock(&rdev->gem.mutex);
72 list_add_tail(&robj->list, &rdev->gem.objects);
73 mutex_unlock(&rdev->gem.mutex);
78 int radeon_gem_set_domain(struct drm_gem_object *gobj,
79 uint32_t rdomain, uint32_t wdomain)
81 struct radeon_bo *robj;
85 /* FIXME: reeimplement */
86 robj = gem_to_radeon_bo(gobj);
87 /* work out where to validate the buffer to */
94 printk(KERN_WARNING "Set domain without domain !\n");
97 if (domain == RADEON_GEM_DOMAIN_CPU) {
98 /* Asking for cpu access wait for object idle */
99 r = radeon_bo_wait(robj, NULL, false);
101 printk(KERN_ERR "Failed to wait for object !\n");
108 int radeon_gem_init(struct radeon_device *rdev)
110 INIT_LIST_HEAD(&rdev->gem.objects);
114 void radeon_gem_fini(struct radeon_device *rdev)
116 radeon_bo_force_delete(rdev);
120 * Call from drm_gem_handle_create which appear in both new and open ioctl
123 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
128 void radeon_gem_object_close(struct drm_gem_object *obj,
129 struct drm_file *file_priv)
131 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
132 struct radeon_device *rdev = rbo->rdev;
133 struct radeon_fpriv *fpriv = file_priv->driver_priv;
134 struct radeon_vm *vm = &fpriv->vm;
135 struct radeon_bo_va *bo_va, *tmp;
137 if (rdev->family < CHIP_CAYMAN) {
141 if (radeon_bo_reserve(rbo, false)) {
144 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
145 if (bo_va->vm == vm) {
146 /* remove from this vm address space */
147 mutex_lock(&vm->mutex);
148 list_del(&bo_va->vm_list);
149 mutex_unlock(&vm->mutex);
150 list_del(&bo_va->bo_list);
154 radeon_bo_unreserve(rbo);
161 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
162 struct drm_file *filp)
164 struct radeon_device *rdev = dev->dev_private;
165 struct drm_radeon_gem_info *args = data;
166 struct ttm_mem_type_manager *man;
169 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
171 args->vram_size = rdev->mc.real_vram_size;
172 args->vram_visible = (u64)man->size << PAGE_SHIFT;
173 if (rdev->stollen_vga_memory)
174 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
175 args->vram_visible -= radeon_fbdev_total_size(rdev);
176 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
177 for(i = 0; i < RADEON_NUM_RINGS; ++i)
178 args->gart_size -= rdev->ring[i].ring_size;
182 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
183 struct drm_file *filp)
185 /* TODO: implement */
186 DRM_ERROR("unimplemented %s\n", __func__);
190 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *filp)
193 /* TODO: implement */
194 DRM_ERROR("unimplemented %s\n", __func__);
198 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
199 struct drm_file *filp)
201 struct radeon_device *rdev = dev->dev_private;
202 struct drm_radeon_gem_create *args = data;
203 struct drm_gem_object *gobj;
207 /* create a gem object to contain this object in */
208 args->size = roundup(args->size, PAGE_SIZE);
209 r = radeon_gem_object_create(rdev, args->size, args->alignment,
210 args->initial_domain, false,
215 r = drm_gem_handle_create(filp, gobj, &handle);
216 /* drop reference from allocate - handle holds it now */
217 drm_gem_object_unreference_unlocked(gobj);
221 args->handle = handle;
225 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *filp)
228 /* transition the BO to a domain -
229 * just validate the BO into a certain domain */
230 struct drm_radeon_gem_set_domain *args = data;
231 struct drm_gem_object *gobj;
232 struct radeon_bo *robj;
235 /* for now if someone requests domain CPU -
236 * just make sure the buffer is finished with */
238 /* just do a BO wait for now */
239 gobj = drm_gem_object_lookup(dev, filp, args->handle);
243 robj = gem_to_radeon_bo(gobj);
245 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
247 drm_gem_object_unreference_unlocked(gobj);
251 int radeon_mode_dumb_mmap(struct drm_file *filp,
252 struct drm_device *dev,
253 uint32_t handle, uint64_t *offset_p)
255 struct drm_gem_object *gobj;
256 struct radeon_bo *robj;
258 gobj = drm_gem_object_lookup(dev, filp, handle);
262 robj = gem_to_radeon_bo(gobj);
263 *offset_p = radeon_bo_mmap_offset(robj);
264 drm_gem_object_unreference_unlocked(gobj);
268 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *filp)
271 struct drm_radeon_gem_mmap *args = data;
273 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
276 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
277 struct drm_file *filp)
279 struct drm_radeon_gem_busy *args = data;
280 struct drm_gem_object *gobj;
281 struct radeon_bo *robj;
283 uint32_t cur_placement = 0;
285 gobj = drm_gem_object_lookup(dev, filp, args->handle);
289 robj = gem_to_radeon_bo(gobj);
290 r = radeon_bo_wait(robj, &cur_placement, true);
291 switch (cur_placement) {
293 args->domain = RADEON_GEM_DOMAIN_VRAM;
296 args->domain = RADEON_GEM_DOMAIN_GTT;
299 args->domain = RADEON_GEM_DOMAIN_CPU;
303 drm_gem_object_unreference_unlocked(gobj);
307 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 struct drm_file *filp)
310 struct drm_radeon_gem_wait_idle *args = data;
311 struct drm_gem_object *gobj;
312 struct radeon_bo *robj;
315 gobj = drm_gem_object_lookup(dev, filp, args->handle);
319 robj = gem_to_radeon_bo(gobj);
320 r = radeon_bo_wait(robj, NULL, false);
321 /* callback hw specific functions if any */
322 if (robj->rdev->asic->ioctl_wait_idle)
323 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
324 drm_gem_object_unreference_unlocked(gobj);
328 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *filp)
331 struct drm_radeon_gem_set_tiling *args = data;
332 struct drm_gem_object *gobj;
333 struct radeon_bo *robj;
336 DRM_DEBUG("%d \n", args->handle);
337 gobj = drm_gem_object_lookup(dev, filp, args->handle);
340 robj = gem_to_radeon_bo(gobj);
341 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
342 drm_gem_object_unreference_unlocked(gobj);
346 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
347 struct drm_file *filp)
349 struct drm_radeon_gem_get_tiling *args = data;
350 struct drm_gem_object *gobj;
351 struct radeon_bo *rbo;
355 gobj = drm_gem_object_lookup(dev, filp, args->handle);
358 rbo = gem_to_radeon_bo(gobj);
359 r = radeon_bo_reserve(rbo, false);
360 if (unlikely(r != 0))
362 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
363 radeon_bo_unreserve(rbo);
365 drm_gem_object_unreference_unlocked(gobj);
369 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *filp)
372 struct drm_radeon_gem_va *args = data;
373 struct drm_gem_object *gobj;
374 struct radeon_device *rdev = dev->dev_private;
375 struct radeon_fpriv *fpriv = filp->driver_priv;
376 struct radeon_bo *rbo;
377 struct radeon_bo_va *bo_va;
381 if (!rdev->vm_manager.enabled) {
382 args->operation = RADEON_VA_RESULT_ERROR;
387 * We don't support vm_id yet, to be sure we don't have have broken
388 * userspace, reject anyone trying to use non 0 value thus moving
389 * forward we can use those fields without breaking existant userspace
392 args->operation = RADEON_VA_RESULT_ERROR;
396 if (args->offset < RADEON_VA_RESERVED_SIZE) {
397 dev_err(&dev->pdev->dev,
398 "offset 0x%lX is in reserved area 0x%X\n",
399 (unsigned long)args->offset,
400 RADEON_VA_RESERVED_SIZE);
401 args->operation = RADEON_VA_RESULT_ERROR;
405 /* don't remove, we need to enforce userspace to set the snooped flag
406 * otherwise we will endup with broken userspace and we won't be able
407 * to enable this feature without adding new interface
409 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
410 if ((args->flags & invalid_flags)) {
411 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
412 args->flags, invalid_flags);
413 args->operation = RADEON_VA_RESULT_ERROR;
416 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
417 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
418 args->operation = RADEON_VA_RESULT_ERROR;
422 switch (args->operation) {
424 case RADEON_VA_UNMAP:
427 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
429 args->operation = RADEON_VA_RESULT_ERROR;
433 gobj = drm_gem_object_lookup(dev, filp, args->handle);
435 args->operation = RADEON_VA_RESULT_ERROR;
438 rbo = gem_to_radeon_bo(gobj);
439 r = radeon_bo_reserve(rbo, false);
441 args->operation = RADEON_VA_RESULT_ERROR;
442 drm_gem_object_unreference_unlocked(gobj);
445 switch (args->operation) {
447 bo_va = radeon_bo_va(rbo, &fpriv->vm);
449 args->operation = RADEON_VA_RESULT_VA_EXIST;
450 args->offset = bo_va->soffset;
453 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
454 args->offset, args->flags);
456 case RADEON_VA_UNMAP:
457 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
462 args->operation = RADEON_VA_RESULT_OK;
464 args->operation = RADEON_VA_RESULT_ERROR;
467 radeon_bo_unreserve(rbo);
468 drm_gem_object_unreference_unlocked(gobj);
472 int radeon_mode_dumb_create(struct drm_file *file_priv,
473 struct drm_device *dev,
474 struct drm_mode_create_dumb *args)
476 struct radeon_device *rdev = dev->dev_private;
477 struct drm_gem_object *gobj;
481 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
482 args->size = args->pitch * args->height;
483 args->size = ALIGN(args->size, PAGE_SIZE);
485 r = radeon_gem_object_create(rdev, args->size, 0,
486 RADEON_GEM_DOMAIN_VRAM,
487 false, ttm_bo_type_device,
492 r = drm_gem_handle_create(file_priv, gobj, &handle);
493 /* drop reference from allocate - handle holds it now */
494 drm_gem_object_unreference_unlocked(gobj);
498 args->handle = handle;
502 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
503 struct drm_device *dev,
506 return drm_gem_handle_delete(file_priv, handle);