static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+ return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
-int __must_check
+struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
u64 size,
bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
+struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view);
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
-static inline u64
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
-{
- return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view);
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
-
-static inline struct i915_vma *
-i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
-}
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
-/* Some GGTT VM helpers */
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, base);
}
-static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *
+i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
{
- return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
+ return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
}
-unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
-
-void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
-static inline void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+static inline unsigned long
+i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
- i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+ return i915_gem_object_to_ggtt(o, view)->node.start;
}
/* i915_gem_fence.c */
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_vma *vma;
struct drm_mm_node node;
char __user *user_data;
uint64_t remain;
uint64_t offset;
int ret;
- ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
- if (ret) {
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
if (ret)
goto out;
i915_gem_object_pin_pages(obj);
} else {
- node.start = i915_gem_obj_ggtt_offset(obj);
+ node.start = vma->node.start;
node.allocated = false;
ret = i915_gem_object_put_fence(obj);
if (ret)
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
- i915_gem_object_ggtt_unpin(obj);
+ i915_vma_unpin(vma);
}
out:
return ret;
{
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_device *dev = obj->base.dev;
+ struct i915_vma *vma;
struct drm_mm_node node;
uint64_t remain, offset;
char __user *user_data;
if (i915_gem_object_is_tiled(obj))
return -EFAULT;
- ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK);
- if (ret) {
+ if (IS_ERR(vma)) {
ret = insert_mappable_node(i915, &node, PAGE_SIZE);
if (ret)
goto out;
i915_gem_object_pin_pages(obj);
} else {
- node.start = i915_gem_obj_ggtt_offset(obj);
+ node.start = vma->node.start;
node.allocated = false;
ret = i915_gem_object_put_fence(obj);
if (ret)
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
- i915_gem_object_ggtt_unpin(obj);
+ i915_vma_unpin(vma);
}
out:
return ret;
/**
* i915_gem_fault - fault a page into the GTT
- * @vma: VMA in question
+ * @area: CPU VMA in question
* @vmf: fault info
*
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
* suffer if the GTT working set is large or there are few fence registers
* left.
*/
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
- struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+ struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_ggtt_view view = i915_ggtt_view_normal;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+ struct i915_vma *vma;
pgoff_t page_offset;
unsigned long pfn;
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
view.params.partial.size =
min_t(unsigned int,
chunk_size,
- (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+ (area->vm_end - area->vm_start) / PAGE_SIZE -
view.params.partial.offset);
}
/* Now pin it into the GTT if needed */
- ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
- if (ret)
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto err_unlock;
+ }
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto err_unpin;
/* Finally, remap it using the new GTT offset */
- pfn = ggtt->mappable_base +
- i915_gem_obj_ggtt_offset_view(obj, &view);
+ pfn = ggtt->mappable_base + vma->node.start;
pfn >>= PAGE_SHIFT;
if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
* is due to userspace losing part of the mapping or never
* having accessed it before (at this partials' range).
*/
- unsigned long base = vma->vm_start +
+ unsigned long base = area->vm_start +
(view.params.partial.offset << PAGE_SHIFT);
unsigned int i;
for (i = 0; i < view.params.partial.size; i++) {
- ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
+ ret = vm_insert_pfn(area,
+ base + i * PAGE_SIZE,
+ pfn + i);
if (ret)
break;
}
obj->fault_mappable = true;
} else {
if (!obj->fault_mappable) {
- unsigned long size = min_t(unsigned long,
- vma->vm_end - vma->vm_start,
- obj->base.size);
+ unsigned long size =
+ min_t(unsigned long,
+ area->vm_end - area->vm_start,
+ obj->base.size) >> PAGE_SHIFT;
+ unsigned long base = area->vm_start;
int i;
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- ret = vm_insert_pfn(vma,
- (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ for (i = 0; i < size; i++) {
+ ret = vm_insert_pfn(area,
+ base + i * PAGE_SIZE,
pfn + i);
if (ret)
break;
obj->fault_mappable = true;
} else
- ret = vm_insert_pfn(vma,
+ ret = vm_insert_pfn(area,
(unsigned long)vmf->virtual_address,
pfn + page_offset);
}
err_unpin:
- i915_gem_object_ggtt_unpin_view(obj, &view);
+ __i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
old_write_domain);
/* And bump the LRU for this access */
- vma = i915_gem_obj_to_ggtt(obj);
+ vma = i915_gem_object_to_ggtt(obj, NULL);
if (vma &&
drm_mm_node_allocated(&vma->node) &&
!i915_vma_is_active(vma))
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
*/
-int
+struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view)
{
+ struct i915_vma *vma;
u32 old_read_domains, old_write_domain;
int ret;
*/
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
- if (ret)
+ if (ret) {
+ vma = ERR_PTR(ret);
goto err_unpin_display;
+ }
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
view->type == I915_GGTT_VIEW_NORMAL ?
PIN_MAPPABLE : 0);
- if (ret)
+ if (IS_ERR(vma))
goto err_unpin_display;
+ WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
+
i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
old_read_domains,
old_write_domain);
- return 0;
+ return vma;
err_unpin_display:
obj->pin_display--;
- return ret;
+ return vma;
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
- if (WARN_ON(obj->pin_display == 0))
+ if (WARN_ON(vma->obj->pin_display == 0))
return;
- i915_gem_object_ggtt_unpin_view(obj, view);
+ vma->obj->pin_display--;
- obj->pin_display--;
+ i915_vma_unpin(vma);
+ WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
}
/**
return ret;
}
-int
+struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags)
{
+ struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
struct i915_vma *vma;
int ret;
- if (!view)
- view = &i915_ggtt_view_normal;
-
- vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
if (flags & PIN_NONBLOCK &&
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:"
obj->map_and_fenceable);
ret = i915_vma_unbind(vma);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
- return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
-}
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ if (ret)
+ return ERR_PTR(ret);
-void
-i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
-{
- i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
+ return vma;
}
static __always_inline unsigned int __busy_read_flag(unsigned int id)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
- vma->vm == vm)
- return vma;
- }
- return NULL;
-}
-
-struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- GEM_BUG_ON(!view);
-
- list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (i915_vma_is_ggtt(vma) &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
- return vma;
- return NULL;
-}
-
int i915_gem_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
}
}
-/* All the new VM stuff */
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_vma *vma;
-
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
- list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (i915_vma_is_ggtt(vma) &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
- return vma->node.start;
- }
-
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
-}
-
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link)
- if (i915_vma_is_ggtt(vma) &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
- return vma->node.start;
-
- WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
- return -1;
-}
-
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (i915_vma_is_ggtt(vma) &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
- return true;
- }
-
- return false;
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link)
- if (i915_vma_is_ggtt(vma) &&
- i915_ggtt_view_equal(&vma->ggtt_view, view) &&
- drm_mm_node_allocated(&vma->node))
- return true;
-
- return false;
-}
-
-unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
-
- GEM_BUG_ON(list_empty(&o->vma_list));
-
- list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (i915_vma_is_ggtt(vma) &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma->node.size;
- }
-
- return 0;
-}
-
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (i915_vma_is_pinned(vma))
- return true;
-
- return false;
-}
-
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
- if (IS_ERR(vma)) {
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+ if (unlikely(IS_ERR(vma))) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
goto err;
struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_vma *vma;
uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset;
void __iomem *reloc_page;
int ret;
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
- return ret;
+ goto unpin;
ret = i915_gem_object_put_fence(obj);
if (ret)
- return ret;
+ goto unpin;
/* Map the page containing the relocation we're going to perform. */
- offset = i915_gem_obj_ggtt_offset(obj);
- offset += reloc->offset;
+ offset = vma->node.start + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
offset += sizeof(uint32_t);
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
- return 0;
+unpin:
+ i915_vma_unpin(vma);
+ return ret;
}
static void
return 0;
}
-static struct i915_vma*
+static struct i915_vma *
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct drm_i915_gem_object *batch_obj,
batch_start_offset,
batch_len,
is_master);
- if (ret)
- goto err;
-
- ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
- if (ret)
- goto err;
+ if (ret) {
+ if (ret == -EACCES) /* unhandled chained batch */
+ vma = NULL;
+ else
+ vma = ERR_PTR(ret);
+ goto out;
+ }
- i915_gem_object_unpin_pages(shadow_batch_obj);
+ vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ goto out;
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
- vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
i915_gem_object_get(shadow_batch_obj);
list_add_tail(&vma->exec_list, &eb->vmas);
- return vma;
-
-err:
+out:
i915_gem_object_unpin_pages(shadow_batch_obj);
- if (ret == -EACCES) /* unhandled chained batch */
- return NULL;
- else
- return ERR_PTR(ret);
+ return vma;
}
static int
* hsw should have this fixed, but bdw mucks it up again. */
if (dispatch_flags & I915_DISPATCH_SECURE) {
struct drm_i915_gem_object *obj = params->batch->obj;
+ struct i915_vma *vma;
/*
* So on first glance it looks freaky that we pin the batch here
* fitting due to fragmentation.
* So this is actually safe.
*/
- ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (ret)
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto err;
+ }
- params->batch = i915_gem_obj_to_ggtt(obj);
+ params->batch = vma;
}
/* Allocate a request for this batch buffer nice and early. */
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
- params->request->batch_obj = params->batch->obj;
+ params->request->batch = params->batch;
ret = i915_gem_request_add_to_client(params->request, file);
if (ret)
POSTING_READ(fence_reg_lo);
if (obj) {
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
unsigned int tiling = i915_gem_object_get_tiling(obj);
unsigned int stride = i915_gem_object_get_stride(obj);
u32 size = vma->node.size;
u32 val;
if (obj) {
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
unsigned int tiling = i915_gem_object_get_tiling(obj);
unsigned int stride = i915_gem_object_get_stride(obj);
int pitch_val;
u32 val;
if (obj) {
- struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+ struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
unsigned int tiling = i915_gem_object_get_tiling(obj);
unsigned int stride = i915_gem_object_get_stride(obj);
u32 pitch_val;
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
- WARN_ON(!ggtt_vma ||
- dev_priv->fence_regs[obj->fence_reg].pin_count >
- i915_vma_pin_count(ggtt_vma));
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ to_i915(obj->base.dev)->fence_regs[obj->fence_reg].pin_count++;
return true;
} else
return false;
}
static struct i915_vma *
-__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
int i;
GEM_BUG_ON(vm->closed);
- if (WARN_ON(i915_is_ggtt(vm) != !!view))
- return ERR_PTR(-EINVAL);
-
vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->exec_list);
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
vma->obj = obj;
vma->size = obj->base.size;
- if (i915_is_ggtt(vm)) {
- vma->flags |= I915_VMA_GGTT;
+ if (view) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
vma->size = view->params.partial.size;
intel_rotation_info_size(&view->params.rotated);
vma->size <<= PAGE_SHIFT;
}
+ }
+
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
} else {
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
list_add_tail(&vma->obj_link, &obj->vma_list);
-
return vma;
}
+static inline bool vma_matches(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ if (vma->vm != vm)
+ return false;
+
+ if (!i915_vma_is_ggtt(vma))
+ return true;
+
+ if (!view)
+ return vma->ggtt_view.type == 0;
+
+ if (vma->ggtt_view.type != view->type)
+ return false;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params)) == 0;
+}
+
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(view ? i915_gem_obj_to_ggtt_view(obj, view) : i915_gem_obj_to_vma(obj, vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
- return __i915_gem_vma_create(obj, vm, view ?: &i915_ggtt_view_normal);
+ return __i915_vma_create(obj, vm, view);
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma(obj, vm);
- if (!vma)
- vma = __i915_gem_vma_create(obj, vm,
- i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+ list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
+ if (vma_matches(vma, vm, view))
+ return vma;
- return vma;
+ return NULL;
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
+ struct i915_vma *vma;
- GEM_BUG_ON(!view);
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ vma = i915_gem_obj_to_vma(obj, vm, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, &ggtt->base, view);
+ vma = __i915_vma_create(obj, vm, view);
GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
-
}
static struct scatterlist *
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-static inline bool
-i915_ggtt_view_equal(const struct i915_ggtt_view *a,
- const struct i915_ggtt_view *b)
-{
- if (WARN_ON(!a || !b))
- return false;
-
- if (a->type != b->type)
- return false;
- if (a->type != I915_GGTT_VIEW_NORMAL)
- return !memcmp(&a->params, &b->params, sizeof(a->params));
- return true;
-}
-
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
#define PIN_MAPPABLE BIT(1)
/* No zalloc, must clear what we need by hand */
req->previous_context = NULL;
req->file_priv = NULL;
- req->batch_obj = NULL;
+ req->batch = NULL;
req->pid = NULL;
req->elsp_submitted = 0;
/** Batch buffer related to this request if any (used for
* error state dump only).
*/
- struct drm_i915_gem_object *batch_obj;
+ struct i915_vma *batch;
struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
if (INTEL_GEN(dev_priv) >= 4)
return 0;
- vma = i915_gem_obj_to_ggtt(obj);
+ vma = i915_gem_object_to_ggtt(obj, NULL);
if (!vma)
return 0;
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *src,
- struct i915_address_space *vm)
+ struct i915_vma *vma)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_gem_object *src;
struct drm_i915_error_object *dst;
- struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
u64 reloc_offset;
- if (src == NULL || src->pages == NULL)
+ if (!vma)
+ return NULL;
+
+ src = vma->obj;
+ if (!src->pages)
return NULL;
num_pages = src->base.size >> PAGE_SHIFT;
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
- if (dst == NULL)
+ if (!dst)
return NULL;
- if (i915_gem_obj_bound(src, vm))
- dst->gtt_offset = i915_gem_obj_offset(src, vm);
- else
- dst->gtt_offset = -1;
-
- reloc_offset = dst->gtt_offset;
- if (i915_is_ggtt(vm))
- vma = i915_gem_obj_to_ggtt(src);
+ reloc_offset = dst->gtt_offset = vma->node.start;
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- vma && (vma->flags & I915_VMA_GLOBAL_BIND) &&
+ (vma->flags & I915_VMA_GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
- if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND))
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
goto unwind;
- reloc_offset = i915_gem_obj_ggtt_offset(src);
+ reloc_offset = vma->node.start;
if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind;
}
kfree(dst);
return NULL;
}
-#define i915_error_ggtt_object_create(dev_priv, src) \
- i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
/* The error capture is special as tries to run underneath the normal
* locking rules - so we use the raw version of the i915_gem_active lookup.
int i, count;
error->semaphore =
- i915_error_ggtt_object_create(dev_priv,
- dev_priv->semaphore->obj);
+ i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
*/
ee->batchbuffer =
i915_error_object_create(dev_priv,
- request->batch_obj,
- ee->vm);
+ request->batch);
if (HAS_BROKEN_CS_TLB(dev_priv))
ee->wa_batchbuffer =
- i915_error_ggtt_object_create(dev_priv,
- engine->scratch->obj);
+ i915_error_object_create(dev_priv,
+ engine->scratch);
- if (request->ctx->engine[i].state) {
- ee->ctx = i915_error_ggtt_object_create(dev_priv,
- request->ctx->engine[i].state->obj);
- }
+ ee->ctx =
+ i915_error_object_create(dev_priv,
+ request->ctx->engine[i].state);
if (request->pid) {
struct task_struct *task;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
- i915_error_ggtt_object_create(dev_priv,
- ring->vma->obj);
+ i915_error_object_create(dev_priv, ring->vma);
}
ee->hws_page =
- i915_error_ggtt_object_create(dev_priv,
- engine->status_page.vma->obj);
+ i915_error_object_create(dev_priv,
+ engine->status_page.vma);
- ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
- engine->wa_ctx.vma->obj);
+ ee->wa_ctx =
+ i915_error_object_create(dev_priv, engine->wa_ctx.vma);
count = 0;
list_for_each_entry(request, &engine->request_list, link)
}
}
-int
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- unsigned int rotation)
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
+ struct i915_vma *vma;
u32 alignment;
int ret;
*/
intel_runtime_pm_get(dev_priv);
- ret = i915_gem_object_pin_to_display_plane(obj, alignment,
- &view);
- if (ret)
+ vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto err_pm;
+ }
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
}
intel_runtime_pm_put(dev_priv);
- return 0;
+ return vma;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj, &view);
+ i915_gem_object_unpin_from_display_plane(vma);
err_pm:
intel_runtime_pm_put(dev_priv);
- return ret;
+ return ERR_PTR(ret);
}
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
+ struct i915_vma *vma;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
if (view.type == I915_GGTT_VIEW_NORMAL)
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj, &view);
+ vma = i915_gem_object_to_ggtt(obj, &view);
+ i915_gem_object_unpin_from_display_plane(vma);
}
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
continue;
obj = intel_fb_obj(fb);
- if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
drm_framebuffer_reference(fb);
goto valid_fb;
}
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+ I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
POSTING_READ(reg);
}
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
+ struct i915_vma *vma;
u64 offset;
intel_fill_fb_ggtt_view(&view, fb, rotation);
- offset = i915_gem_obj_ggtt_offset_view(obj, &view);
+ vma = i915_gem_object_to_ggtt(obj, &view);
+ if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
+ view.type))
+ return -1;
+
+ offset = vma->node.start;
WARN_ON(upper_32_bits(offset));
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request;
+ struct i915_vma *vma;
int ret;
/*
mmio_flip = use_mmio_flip(engine, obj);
- ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
- if (ret)
+ vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto cleanup_pending;
+ }
work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
work->gtt_offset += intel_crtc->dspaddr_offset;
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ if (IS_ERR(vma))
+ ret = PTR_ERR(vma);
}
if (ret == 0) {
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
- addr = i915_gem_obj_ggtt_offset(obj);
+ addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
- int ret;
intel_init_gt_powersave(dev_priv);
* for this.
*/
for_each_crtc(dev, c) {
+ struct i915_vma *vma;
+
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+ vma = intel_pin_and_fence_fb_obj(c->primary->fb,
c->primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
- if (ret) {
+ if (IS_ERR(vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
+ struct i915_vma *vma;
async_cookie_t cookie;
int preferred_bpp;
};
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- unsigned int rotation);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
if (IS_GEN(dev_priv, 5, 6))
- cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+ cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
cache->fb.fence_reg = obj->fence_reg;
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
bool prealloc = false;
void __iomem *vaddr;
int ret;
sizes->fb_height = intel_fb->base.height;
}
- obj = intel_fb->obj;
-
mutex_lock(&dev->struct_mutex);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
- if (ret)
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto out_unlock;
+ }
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
- vma = i915_gem_obj_to_ggtt(obj);
-
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (ifbdev->fb->obj->stolen && !prealloc)
+ if (intel_fb->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
- fb->width, fb->height,
- i915_gem_obj_ggtt_offset(obj), obj);
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx\n",
+ fb->width, fb->height, vma->node.start);
+ ifbdev->vma = vma;
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
* Note that GuC needs the CSS header plus uKernel code to be copied by the
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
+ struct i915_vma *vma)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
unsigned long offset;
- struct sg_table *sg = fw_obj->pages;
+ struct sg_table *sg = vma->pages;
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
int i, ret = 0;
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
+ offset = vma->node.start + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct drm_device *dev = &dev_priv->drm;
+ struct i915_vma *vma;
int ret;
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
return ret;
}
- ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("pin failed %d\n", ret);
- return ret;
+ vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
}
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
set_guc_init_params(dev_priv);
- ret = guc_ucode_xfer_dma(dev_priv);
+ ret = guc_ucode_xfer_dma(dev_priv, vma);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
- i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+ i915_vma_unpin(vma);
return ret;
}
i915_gem_track_fb(vma->obj, NULL,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
- i915_gem_object_unpin_from_display_plane(vma->obj, &i915_ggtt_view_normal);
+ i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
if (WARN_ON(!vma))
return;
- i915_gem_object_unpin_from_display_plane(vma->obj, &i915_ggtt_view_normal);
+ i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
overlay->crtc->overlay = NULL;
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
+ vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
&i915_ggtt_view_normal);
- if (ret != 0)
- return ret;
-
- vma = i915_gem_obj_to_ggtt_view(new_bo, &i915_ggtt_view_normal);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
ret = i915_gem_object_put_fence(new_bo);
if (ret)
swidth = params->src_w;
swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
+ iowrite32(vma->node.start + params->offset_Y, ®s->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
+ iowrite32(vma->node.start + params->offset_U, ®s->OBUF_0U);
+ iowrite32(vma->node.start + params->offset_V, ®s->OBUF_0V);
ostride |= params->stride_UV << 16;
}
return 0;
out_unpin:
- i915_gem_object_ggtt_unpin(new_bo);
+ i915_gem_object_unpin_from_display_plane(vma);
return ret;
}
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
+ struct i915_vma *vma = NULL;
int ret;
if (!HAS_OVERLAY(dev_priv))
}
overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else {
- ret = i915_gem_object_ggtt_pin(reg_bo, NULL,
+ vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
0, PAGE_SIZE, PIN_MAPPABLE);
- if (ret) {
+ if (IS_ERR(vma)) {
DRM_ERROR("failed to pin overlay register bo\n");
+ ret = PTR_ERR(vma);
goto out_free_bo;
}
- overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+ overlay->flip_addr = vma->node.start;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
return;
out_unpin_bo:
- if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
- i915_gem_object_ggtt_unpin(reg_bo);
+ if (vma)
+ i915_vma_unpin(vma);
out_free_bo:
i915_gem_object_put(reg_bo);
out_free: