]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/radeon: split PT setup in more functions
authorChristian König <christian.koenig@amd.com>
Wed, 30 Jul 2014 19:05:17 +0000 (21:05 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 5 Aug 2014 12:53:55 +0000 (08:53 -0400)
Move the decision what to use into the common VM code.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/si_dma.c

index db8ce30ff31ff7cbe222211c54dac68fa089020b..bcf480510ac228af4bdf6e177eb0ccfb5951b24c 100644 (file)
@@ -749,7 +749,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * cik_sdma_vm_set_page - update the page tables using sDMA
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+                           struct radeon_ib *ib,
+                           uint64_t pe, uint64_t src,
+                           unsigned count)
+{
+       while (count) {
+               unsigned bytes = count * 8;
+               if (bytes > 0x1FFFF8)
+                       bytes = 0x1FFFF8;
+
+               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+                       SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+               ib->ptr[ib->length_dw++] = bytes;
+               ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+               ib->ptr[ib->length_dw++] = lower_32_bits(src);
+               ib->ptr[ib->length_dw++] = upper_32_bits(src);
+               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+               pe += bytes;
+               src += bytes;
+               count -= bytes / 8;
+       }
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
  *
  * @rdev: radeon_device pointer
  * @ib: indirect buffer to fill with commands
@@ -759,84 +795,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  * @incr: increase next addr by incr bytes
  * @flags: access flags
  *
- * Update the page tables using sDMA (CIK).
+ * Update PTEs by writing them manually using sDMA (CIK).
  */
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
-                         struct radeon_ib *ib,
-                         uint64_t pe,
-                         uint64_t addr, unsigned count,
-                         uint32_t incr, uint32_t flags)
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+                            struct radeon_ib *ib,
+                            uint64_t pe,
+                            uint64_t addr, unsigned count,
+                            uint32_t incr, uint32_t flags)
 {
        uint64_t value;
        unsigned ndw;
 
-       trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
-       if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
-               uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
-               while (count) {
-                       unsigned bytes = count * 8;
-                       if (bytes > 0x1FFFF8)
-                               bytes = 0x1FFFF8;
-
-                       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-                       ib->ptr[ib->length_dw++] = bytes;
-                       ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
-                       ib->ptr[ib->length_dw++] = lower_32_bits(src);
-                       ib->ptr[ib->length_dw++] = upper_32_bits(src);
-                       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
-                       pe += bytes;
-                       src += bytes;
-                       count -= bytes / 8;
-               }
-       } else if (flags & R600_PTE_SYSTEM) {
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
-
-                       /* for non-physically contiguous pages (system) */
-                       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
-                       ib->ptr[ib->length_dw++] = pe;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-                       ib->ptr[ib->length_dw++] = ndw;
-                       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               /* for non-physically contiguous pages (system) */
+               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+                       SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+               ib->ptr[ib->length_dw++] = pe;
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+               ib->ptr[ib->length_dw++] = ndw;
+               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                       if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
                                value &= 0xFFFFFFFFFFFFF000ULL;
-                               addr += incr;
-                               value |= flags;
-                               ib->ptr[ib->length_dw++] = value;
-                               ib->ptr[ib->length_dw++] = upper_32_bits(value);
-                       }
-               }
-       } else {
-               while (count) {
-                       ndw = count;
-                       if (ndw > 0x7FFFF)
-                               ndw = 0x7FFFF;
-
-                       if (flags & R600_PTE_VALID)
+                       } else if (flags & R600_PTE_VALID) {
                                value = addr;
-                       else
+                       } else {
                                value = 0;
-                       /* for physically contiguous pages (vram) */
-                       ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
-                       ib->ptr[ib->length_dw++] = pe; /* dst addr */
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-                       ib->ptr[ib->length_dw++] = flags; /* mask */
-                       ib->ptr[ib->length_dw++] = 0;
-                       ib->ptr[ib->length_dw++] = value; /* value */
+                       }
+                       addr += incr;
+                       value |= flags;
+                       ib->ptr[ib->length_dw++] = value;
                        ib->ptr[ib->length_dw++] = upper_32_bits(value);
-                       ib->ptr[ib->length_dw++] = incr; /* increment size */
-                       ib->ptr[ib->length_dw++] = 0;
-                       ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-                       pe += ndw * 8;
-                       addr += ndw * incr;
-                       count -= ndw;
                }
        }
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+                          struct radeon_ib *ib,
+                          uint64_t pe,
+                          uint64_t addr, unsigned count,
+                          uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count;
+               if (ndw > 0x7FFFF)
+                       ndw = 0x7FFFF;
+
+               if (flags & R600_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+               pe += ndw * 8;
+               addr += ndw * incr;
+               count -= ndw;
+       }
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
        while (ib->length_dw & 0x7)
                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 }
index 66325ef396945c5d9d2bd5caa60349b80423b428..8a3e6221cece2c9eb09ca94a2d8b749dd6ea60e1 100644 (file)
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * cayman_dma_vm_set_page - update the page tables using the DMA
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+                             struct radeon_ib *ib,
+                             uint64_t pe, uint64_t src,
+                             unsigned count)
+{
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+                                                     0, 0, ndw);
+               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+               ib->ptr[ib->length_dw++] = lower_32_bits(src);
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+               pe += ndw * 4;
+               src += ndw * 4;
+               count -= ndw / 2;
+       }
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
  *
  * @rdev: radeon_device pointer
  * @ib: indirect buffer to fill with commands
@@ -315,90 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: hw access flags 
+ * @flags: hw access flags
  *
- * Update the page tables using the DMA (cayman/TN).
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
  */
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
-                           struct radeon_ib *ib,
-                           uint64_t pe,
-                           uint64_t addr, unsigned count,
-                           uint32_t incr, uint32_t flags)
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+                              struct radeon_ib *ib,
+                              uint64_t pe,
+                              uint64_t addr, unsigned count,
+                              uint32_t incr, uint32_t flags)
 {
        uint64_t value;
        unsigned ndw;
 
-       trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
-       if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
-               uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
-
-                       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
-                                                             0, 0, ndw);
-                       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-                       ib->ptr[ib->length_dw++] = lower_32_bits(src);
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
-                       pe += ndw * 4;
-                       src += ndw * 4;
-                       count -= ndw / 2;
-               }
-
-       } else if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
-
-                       /* for non-physically contiguous pages (system) */
-                       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
-                       ib->ptr[ib->length_dw++] = pe;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-                               if (flags & R600_PTE_SYSTEM) {
-                                       value = radeon_vm_map_gart(rdev, addr);
-                                       value &= 0xFFFFFFFFFFFFF000ULL;
-                               } else if (flags & R600_PTE_VALID) {
-                                       value = addr;
-                               } else {
-                                       value = 0;
-                               }
-                               addr += incr;
-                               value |= flags;
-                               ib->ptr[ib->length_dw++] = value;
-                               ib->ptr[ib->length_dw++] = upper_32_bits(value);
-                       }
-               }
-       } else {
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
-
-                       if (flags & R600_PTE_VALID)
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               /* for non-physically contiguous pages (system) */
+               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+                                                     0, 0, ndw);
+               ib->ptr[ib->length_dw++] = pe;
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                       if (flags & R600_PTE_SYSTEM) {
+                               value = radeon_vm_map_gart(rdev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                       } else if (flags & R600_PTE_VALID) {
                                value = addr;
-                       else
+                       } else {
                                value = 0;
-                       /* for physically contiguous pages (vram) */
-                       ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
-                       ib->ptr[ib->length_dw++] = pe; /* dst addr */
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       ib->ptr[ib->length_dw++] = flags; /* mask */
-                       ib->ptr[ib->length_dw++] = 0;
-                       ib->ptr[ib->length_dw++] = value; /* value */
+                       }
+                       addr += incr;
+                       value |= flags;
+                       ib->ptr[ib->length_dw++] = value;
                        ib->ptr[ib->length_dw++] = upper_32_bits(value);
-                       ib->ptr[ib->length_dw++] = incr; /* increment size */
-                       ib->ptr[ib->length_dw++] = 0;
-                       pe += ndw * 4;
-                       addr += (ndw / 2) * incr;
-                       count -= ndw / 2;
                }
        }
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+                            struct radeon_ib *ib,
+                            uint64_t pe,
+                            uint64_t addr, unsigned count,
+                            uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               if (flags & R600_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+
+               pe += ndw * 4;
+               addr += (ndw / 2) * incr;
+               count -= ndw / 2;
+       }
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
        while (ib->length_dw & 0x7)
                ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
 }
index 56fc7d2da1494d939dbba66a156cc6e4da19dc46..142cad63c3fad28e27872fee55dad7989bdecbce 100644 (file)
@@ -1797,11 +1797,21 @@ struct radeon_asic {
        struct {
                int (*init)(struct radeon_device *rdev);
                void (*fini)(struct radeon_device *rdev);
-               void (*set_page)(struct radeon_device *rdev,
-                                struct radeon_ib *ib,
-                                uint64_t pe,
-                                uint64_t addr, unsigned count,
-                                uint32_t incr, uint32_t flags);
+               void (*copy_pages)(struct radeon_device *rdev,
+                                  struct radeon_ib *ib,
+                                  uint64_t pe, uint64_t src,
+                                  unsigned count);
+               void (*write_pages)(struct radeon_device *rdev,
+                                   struct radeon_ib *ib,
+                                   uint64_t pe,
+                                   uint64_t addr, unsigned count,
+                                   uint32_t incr, uint32_t flags);
+               void (*set_pages)(struct radeon_device *rdev,
+                                 struct radeon_ib *ib,
+                                 uint64_t pe,
+                                 uint64_t addr, unsigned count,
+                                 uint32_t incr, uint32_t flags);
+               void (*pad_ib)(struct radeon_ib *ib);
        } vm;
        /* ring specific callbacks */
        struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
@@ -2761,7 +2771,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
 #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
 #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
 #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
 #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
index c49a01f92b4de94dc02901715294145637d0cb8f..eeeeabe09758ddccf2175ad48d9b9bd62e896e83 100644 (file)
@@ -1613,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .set_page = &cayman_dma_vm_set_page,
+               .copy_pages = &cayman_dma_vm_copy_pages,
+               .write_pages = &cayman_dma_vm_write_pages,
+               .set_pages = &cayman_dma_vm_set_pages,
+               .pad_ib = &cayman_dma_vm_pad_ib,
        },
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1713,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .set_page = &cayman_dma_vm_set_page,
+               .copy_pages = &cayman_dma_vm_copy_pages,
+               .write_pages = &cayman_dma_vm_write_pages,
+               .set_pages = &cayman_dma_vm_set_pages,
+               .pad_ib = &cayman_dma_vm_pad_ib,
        },
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1843,7 +1849,10 @@ static struct radeon_asic si_asic = {
        .vm = {
                .init = &si_vm_init,
                .fini = &si_vm_fini,
-               .set_page = &si_dma_vm_set_page,
+               .copy_pages = &si_dma_vm_copy_pages,
+               .write_pages = &si_dma_vm_write_pages,
+               .set_pages = &si_dma_vm_set_pages,
+               .pad_ib = &cayman_dma_vm_pad_ib,
        },
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -2001,7 +2010,10 @@ static struct radeon_asic ci_asic = {
        .vm = {
                .init = &cik_vm_init,
                .fini = &cik_vm_fini,
-               .set_page = &cik_sdma_vm_set_page,
+               .copy_pages = &cik_sdma_vm_copy_pages,
+               .write_pages = &cik_sdma_vm_write_pages,
+               .set_pages = &cik_sdma_vm_set_pages,
+               .pad_ib = &cik_sdma_vm_pad_ib,
        },
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2105,7 +2117,10 @@ static struct radeon_asic kv_asic = {
        .vm = {
                .init = &cik_vm_init,
                .fini = &cik_vm_fini,
-               .set_page = &cik_sdma_vm_set_page,
+               .copy_pages = &cik_sdma_vm_copy_pages,
+               .write_pages = &cik_sdma_vm_write_pages,
+               .set_pages = &cik_sdma_vm_set_pages,
+               .pad_ib = &cik_sdma_vm_pad_ib,
        },
        .ring = {
                [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
index 3cf6be6666fc52d3c9c269181b3ed0715def76b2..275a5dc01780f7bedbed2cb3486c4c3d383710eb 100644 (file)
@@ -607,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
                                struct radeon_ib *ib);
 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
-                           struct radeon_ib *ib,
-                           uint64_t pe,
-                           uint64_t addr, unsigned count,
-                           uint32_t incr, uint32_t flags);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+                             struct radeon_ib *ib,
+                             uint64_t pe, uint64_t src,
+                             unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+                              struct radeon_ib *ib,
+                              uint64_t pe,
+                              uint64_t addr, unsigned count,
+                              uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+                            struct radeon_ib *ib,
+                            uint64_t pe,
+                            uint64_t addr, unsigned count,
+                            uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
 
 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
@@ -694,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
                uint64_t src_offset, uint64_t dst_offset,
                unsigned num_gpu_pages,
                struct radeon_fence **fence);
-void si_dma_vm_set_page(struct radeon_device *rdev,
-                       struct radeon_ib *ib,
-                       uint64_t pe,
-                       uint64_t addr, unsigned count,
-                       uint32_t incr, uint32_t flags);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+                         struct radeon_ib *ib,
+                         uint64_t pe, uint64_t src,
+                         unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+                          struct radeon_ib *ib,
+                          uint64_t pe,
+                          uint64_t addr, unsigned count,
+                          uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+                        struct radeon_ib *ib,
+                        uint64_t pe,
+                        uint64_t addr, unsigned count,
+                        uint32_t incr, uint32_t flags);
+
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 u32 si_get_xclk(struct radeon_device *rdev);
 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -772,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
 int cik_vm_init(struct radeon_device *rdev);
 void cik_vm_fini(struct radeon_device *rdev);
 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
-                         struct radeon_ib *ib,
-                         uint64_t pe,
-                         uint64_t addr, unsigned count,
-                         uint32_t incr, uint32_t flags);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+                           struct radeon_ib *ib,
+                           uint64_t pe, uint64_t src,
+                           unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+                            struct radeon_ib *ib,
+                            uint64_t pe,
+                            uint64_t addr, unsigned count,
+                            uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+                          struct radeon_ib *ib,
+                          uint64_t pe,
+                          uint64_t addr, unsigned count,
+                          uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 u32 cik_gfx_get_rptr(struct radeon_device *rdev,
index d520ab71b74857d61c4b4520a00594266f7978df..e9758816203007576fec110dd924f347b007cfbe 100644 (file)
@@ -340,6 +340,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
        return bo_va;
 }
 
+/**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+                               struct radeon_ib *ib,
+                               uint64_t pe,
+                               uint64_t addr, unsigned count,
+                               uint32_t incr, uint32_t flags)
+{
+       trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+       if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+               uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+               radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+       } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+               radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+                                          count, incr, flags);
+
+       } else {
+               radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+                                        count, incr, flags);
+       }
+}
+
 /**
  * radeon_vm_clear_bo - initially clear the page dir/table
  *
@@ -381,7 +417,8 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
 
        ib.length_dw = 0;
 
-       radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+       radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+       radeon_asic_vm_pad_ib(rdev, &ib);
 
        r = radeon_ib_schedule(rdev, &ib, NULL);
        if (r)
@@ -634,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
                    ((last_pt + incr * count) != pt)) {
 
                        if (count) {
-                               radeon_asic_vm_set_page(rdev, &ib, last_pde,
-                                                       last_pt, count, incr,
-                                                       R600_PTE_VALID);
+                               radeon_vm_set_pages(rdev, &ib, last_pde,
+                                                   last_pt, count, incr,
+                                                   R600_PTE_VALID);
                        }
 
                        count = 1;
@@ -648,10 +685,11 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
        }
 
        if (count)
-               radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
-                                       incr, R600_PTE_VALID);
+               radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+                                   incr, R600_PTE_VALID);
 
        if (ib.length_dw != 0) {
+               radeon_asic_vm_pad_ib(rdev, &ib);
                radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
                radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
                r = radeon_ib_schedule(rdev, &ib, NULL);
@@ -719,30 +757,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
            (frag_start >= frag_end)) {
 
                count = (pe_end - pe_start) / 8;
-               radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
-                                       RADEON_GPU_PAGE_SIZE, flags);
+               radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+                                   RADEON_GPU_PAGE_SIZE, flags);
                return;
        }
 
        /* handle the 4K area at the beginning */
        if (pe_start != frag_start) {
                count = (frag_start - pe_start) / 8;
-               radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
-                                       RADEON_GPU_PAGE_SIZE, flags);
+               radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+                                   RADEON_GPU_PAGE_SIZE, flags);
                addr += RADEON_GPU_PAGE_SIZE * count;
        }
 
        /* handle the area in the middle */
        count = (frag_end - frag_start) / 8;
-       radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
-                               RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+       radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+                           RADEON_GPU_PAGE_SIZE, flags | frag_flags);
 
        /* handle the 4K area at the end */
        if (frag_end != pe_end) {
                addr += RADEON_GPU_PAGE_SIZE * count;
                count = (pe_end - frag_end) / 8;
-               radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
-                                       RADEON_GPU_PAGE_SIZE, flags);
+               radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+                                   RADEON_GPU_PAGE_SIZE, flags);
        }
 }
 
@@ -900,6 +938,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
                              bo_va->it.last + 1, addr,
                              radeon_vm_page_flags(bo_va->flags));
 
+       radeon_asic_vm_pad_ib(rdev, &ib);
        radeon_semaphore_sync_to(ib.semaphore, vm->fence);
        r = radeon_ib_schedule(rdev, &ib, NULL);
        if (r) {
index a26e842cf39136c6c5ba01ded73538aa5bffc01d..7165051294504470c24e054b384802f05dc24de7 100644 (file)
@@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 }
 
 /**
- * si_dma_vm_set_page - update the page tables using the DMA
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+                         struct radeon_ib *ib,
+                         uint64_t pe, uint64_t src,
+                         unsigned count)
+{
+       while (count) {
+               unsigned bytes = count * 8;
+               if (bytes > 0xFFFF8)
+                       bytes = 0xFFFF8;
+
+               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+                                                     1, 0, 0, bytes);
+               ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+               ib->ptr[ib->length_dw++] = lower_32_bits(src);
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+               pe += bytes;
+               src += bytes;
+               count -= bytes / 8;
+       }
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
  *
  * @rdev: radeon_device pointer
  * @ib: indirect buffer to fill with commands
@@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  * @incr: increase next addr by incr bytes
  * @flags: access flags
  *
- * Update the page tables using the DMA (SI).
+ * Update PTEs by writing them manually using the DMA (SI).
  */
-void si_dma_vm_set_page(struct radeon_device *rdev,
-                       struct radeon_ib *ib,
-                       uint64_t pe,
-                       uint64_t addr, unsigned count,
-                       uint32_t incr, uint32_t flags)
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+                          struct radeon_ib *ib,
+                          uint64_t pe,
+                          uint64_t addr, unsigned count,
+                          uint32_t incr, uint32_t flags)
 {
        uint64_t value;
        unsigned ndw;
 
-       trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
-       if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
-               uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
-               while (count) {
-                       unsigned bytes = count * 8;
-                       if (bytes > 0xFFFF8)
-                               bytes = 0xFFFF8;
-
-                       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
-                                                             1, 0, 0, bytes);
-                       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
-                       ib->ptr[ib->length_dw++] = lower_32_bits(src);
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
-                       pe += bytes;
-                       src += bytes;
-                       count -= bytes / 8;
-               }
-       } else if (flags & R600_PTE_SYSTEM) {
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
-
-                       /* for non-physically contiguous pages (system) */
-                       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
-                       ib->ptr[ib->length_dw++] = pe;
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               /* for non-physically contiguous pages (system) */
+               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+               ib->ptr[ib->length_dw++] = pe;
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                       if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
                                value &= 0xFFFFFFFFFFFFF000ULL;
-                               addr += incr;
-                               value |= flags;
-                               ib->ptr[ib->length_dw++] = value;
-                               ib->ptr[ib->length_dw++] = upper_32_bits(value);
-                       }
-               }
-       } else {
-               while (count) {
-                       ndw = count * 2;
-                       if (ndw > 0xFFFFE)
-                               ndw = 0xFFFFE;
-
-                       if (flags & R600_PTE_VALID)
+                       } else if (flags & R600_PTE_VALID) {
                                value = addr;
-                       else
+                       } else {
                                value = 0;
-                       /* for physically contiguous pages (vram) */
-                       ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
-                       ib->ptr[ib->length_dw++] = pe; /* dst addr */
-                       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-                       ib->ptr[ib->length_dw++] = flags; /* mask */
-                       ib->ptr[ib->length_dw++] = 0;
-                       ib->ptr[ib->length_dw++] = value; /* value */
+                       }
+                       addr += incr;
+                       value |= flags;
+                       ib->ptr[ib->length_dw++] = value;
                        ib->ptr[ib->length_dw++] = upper_32_bits(value);
-                       ib->ptr[ib->length_dw++] = incr; /* increment size */
-                       ib->ptr[ib->length_dw++] = 0;
-                       pe += ndw * 4;
-                       addr += (ndw / 2) * incr;
-                       count -= ndw / 2;
                }
        }
-       while (ib->length_dw & 0x7)
-               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+                        struct radeon_ib *ib,
+                        uint64_t pe,
+                        uint64_t addr, unsigned count,
+                        uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               if (flags & R600_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+               pe += ndw * 4;
+               addr += (ndw / 2) * incr;
+               count -= ndw / 2;
+       }
 }
 
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)