#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_mocs.h"
+#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
return -EINVAL;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* anyway again before the next pread happens. */
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
}
ret = i915_gem_object_get_pages(obj);
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
+
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
/* If we're not in the cpu write domain, set ourself into the gtt
* write domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
needs_clflush_after = cpu_write_needs_clflush(obj);
- ret = i915_gem_object_wait_rendering(obj, false);
- if (ret)
- return ret;
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
+ struct reservation_object *resv;
int ret, i;
- if (!obj->active)
- return 0;
-
if (readonly) {
if (obj->last_write_req != NULL) {
ret = i915_wait_request(obj->last_write_req);
GEM_BUG_ON(obj->active);
}
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv) {
+ long err;
+
+ err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
struct i915_vma *vma;
int ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
- return 0;
-
ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
uint32_t old_write_domain, old_read_domains;
int ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
- return 0;
-
ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
int ret;
vma->pin_count = 0;
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind_no_wait(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;