static inline gtt_pte_t pte_encode(struct drm_device *dev,
dma_addr_t addr,
- gtt_pte_t cache_bits)
+ enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
- pte |= cache_bits;
+
+ switch (level) {
+ case I915_CACHE_LLC_MLC:
+ /* Haswell doesn't set L3 this way */
+ if (IS_HASWELL(dev))
+ pte |= GEN6_PTE_CACHE_LLC;
+ else
+ pte |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ if (IS_HASWELL(dev))
+ pte |= HSW_PTE_UNCACHED;
+ else
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
return pte;
}
unsigned last_pte, i;
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
- GEN6_PTE_CACHE_LLC);
+ I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
- gtt_pte_t pte_flags)
+ enum i915_cache_level cache_level)
{
gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
- pte_flags);
+ cache_level);
/* grab the next page */
if (++m == segment_len) {
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- gtt_pte_t pte_flags = GEN6_PTE_VALID;
-
- switch (cache_level) {
- case I915_CACHE_LLC_MLC:
- /* Haswell doesn't set L3 this way */
- if (IS_HASWELL(ppgtt->dev))
- pte_flags |= GEN6_PTE_CACHE_LLC;
- else
- pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte_flags |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- if (IS_HASWELL(ppgtt->dev))
- pte_flags |= HSW_PTE_UNCACHED;
- else
- pte_flags |= GEN6_PTE_UNCACHED;
- break;
- default:
- BUG();
- }
-
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,