return;
if (--vma->obj->pin_display == 0)
- vma->display_alignment = 4096;
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
if (!i915_vma_is_active(vma))
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define GEN6_CONTEXT_ALIGN (64<<10)
-#define GEN7_CONTEXT_ALIGN 4096
+#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
- ctx->ggtt_offset_bias = 4096;
+ ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
return ctx;
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size =
- round_up(get_context_size(dev_priv), 4096);
+ round_up(get_context_size(dev_priv),
+ I915_GTT_PAGE_SIZE);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
if (check_color) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start > target->vm->start)
- start -= 4096;
+ start -= I915_GTT_PAGE_SIZE;
if (end < target->vm->start + target->vm->total)
- end += 4096;
+ end += I915_GTT_PAGE_SIZE;
}
drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
memset(&cache->node, 0, sizeof(cache->node));
ret = drm_mm_insert_node_in_range_generic
(&ggtt->base.mm, &cache->node,
- 4096, 0, I915_COLOR_UNEVICTABLE,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
- if (entry->alignment &&
- vma->node.start & (entry->alignment - 1))
+ if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
if (vma->node.size < entry->pad_to_size)
unsigned int stride = i915_gem_object_get_stride(vma->obj);
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- GEM_BUG_ON(vma->node.start & 4095);
- GEM_BUG_ON(vma->fence_size & 4095);
- GEM_BUG_ON(stride & 127);
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
+ GEM_BUG_ON(!IS_ALIGNED(stride, 128));
- val = (vma->node.start + vma->fence_size - 4096) << 32;
+ val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
val |= vma->node.start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
- GEM_BUG_ON(vma->node.start & (vma->fence_size - 1));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
stride /= 128;
GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!is_power_of_2(stride / 128));
- GEM_BUG_ON(vma->node.start & (vma->fence_size - 1));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
val = vma->node.start;
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
struct drm_i915_private;
struct i915_vma;
+#define I965_FENCE_PAGE 4096UL
+
struct drm_i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
return -ENOMEM;
p->daddr = dma_map_page(kdev,
- p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, p->daddr)) {
__free_page(p->page);
if (WARN_ON(!p->page))
return;
- dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
memset(p, 0, sizeof(*p));
}
u64 *end)
{
if (node->color != color)
- *start += 4096;
+ *start += I915_GTT_PAGE_SIZE;
node = list_next_entry(node, node_list);
if (node->allocated && node->color != color)
- *end -= 4096;
+ *end -= I915_GTT_PAGE_SIZE;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* Reserve a mappable slot for our lockless error capture */
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ggtt->error_capture,
- 4096, 0,
+ PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
0, 0);
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
+#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
#define PIN_HIGH BIT(9)
#define PIN_OFFSET_BIAS BIT(10)
#define PIN_OFFSET_FIXED BIT(11)
-#define PIN_OFFSET_MASK (~4095)
+#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
if (!rodata)
return 0;
- if (rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return -ENOMEM;
- obj = i915_gem_object_create_internal(engine->i915, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free;
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
- if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
- WARN_ON(stolen_offset & 4095))
+ if (WARN_ON(size == 0) ||
+ WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+ WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (INTEL_GEN(i915) >= 4) {
stride *= i915_gem_tile_height(tiling);
- GEM_BUG_ON(stride & 4095);
+ GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
return roundup(size, stride);
}
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_GEN(i915) >= 4 || tiling == I915_TILING_NONE)
- return 4096;
+ if (tiling == I915_TILING_NONE)
+ return I915_GTT_MIN_ALIGNMENT;
+
+ if (INTEL_GEN(i915) >= 4)
+ return I965_FENCE_PAGE;
/*
* Previous chips need to be aligned to the size of the smallest
else
tile_width = 512;
- if (stride & (tile_width - 1))
+ if (!IS_ALIGNED(stride, tile_width))
return false;
/* 965+ just needs multiples of tile width */
return false;
alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
- if (vma->node.start & (alignment - 1))
+ if (!IS_ALIGNED(vma->node.start, alignment))
return false;
return true;
vma->vm = vm;
vma->obj = obj;
vma->size = obj->base.size;
- vma->display_alignment = 4096;
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
if (view) {
vma->ggtt_view = *view;
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
- GEM_BUG_ON(vma->fence_size & 4095);
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
i915_gem_object_get_tiling(obj),
if (vma->node.size < size)
return true;
- if (alignment && vma->node.start & (alignment - 1))
+ GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+ if (alignment && !IS_ALIGNED(vma->node.start, alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return;
fenceable = (vma->node.size >= vma->fence_size &&
- (vma->node.start & (vma->fence_alignment - 1)) == 0);
+ IS_ALIGNED(vma->node.start, vma->fence_alignment));
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
alignment, vma->fence_alignment);
}
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(!is_power_of_2(alignment));
+
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+ end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+ GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) ||
+ if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) {
ret = -EINVAL;
goto err_unpin;
* with zero alignment, so where possible use the optimal
* path.
*/
- if (alignment <= 4096)
+ if (alignment <= I915_GTT_MIN_ALIGNMENT)
alignment = 0;
search_free:
engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
- ret = intel_engine_create_scratch(engine, 4096);
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
WARN_ON(ce->state);
- context_size = round_up(intel_lr_context_size(engine), 4096);
+ context_size = round_up(intel_lr_context_size(engine),
+ I915_GTT_PAGE_SIZE);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
#include "intel_ringbuffer.h"
-#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
/* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
void *vaddr;
int ret;
- obj = i915_gem_object_create_internal(engine->i915, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr = memset(vaddr, 0, 4096);
+ engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- ret = intel_ring_pin(ring, 4096);
+ ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
if (ret) {
intel_ring_free(ring);
goto error;
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
struct i915_vma *vma;
- obj = i915_gem_object_create(dev_priv, 4096);
+ obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(obj))
goto err;
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_engine_create_scratch(engine, 4096);
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {