]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <airlied@redhat.com>
Thu, 15 Oct 2015 23:39:14 +0000 (09:39 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 15 Oct 2015 23:39:14 +0000 (09:39 +1000)
This is the first radeon and amdgpu pull for drm-next. Highlights include:
- Efficiency improvements to the CS checker for pre-SI asics
- Cursor fixes ported from radeon to amdgpu
- Enable GPU scheduler by default
- Add a bunch of GPUVM debugging options
- Add support for some new atombios opcodes
- Misc cleanups and fixes

* 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux: (42 commits)
  drm/amdgpu: fix lockup when clean pending fences
  drm/amdgpu: add timer to fence to detect scheduler lockup
  drm/amdgpu: add VM CS mapping trace point
  drm/amdgpu: add option to clear VM page tables after every submit
  drm/amdgpu: add option to stop on VM fault
  drm/amdgpu: only print meaningful VM faults
  drm/amdgpu: also trace already allocated VMIDs
  drm/amdgpu: Drop unnecessary #include <linux/vga_switcheroo.h>
  drm/radeon: Drop unnecessary #include <linux/vga_switcheroo.h>
  drm/amdgpu: clean up pageflip interrupt handling
  drm/amdgpu: rework sdma structures
  drm/amdgpu: unpin cursor BOs on suspend and pin them again on resume
  drm/amdgpu/dce8: Fold set_cursor() into show_cursor()
  drm/amdgpu/dce8: Clean up reference counting and pinning of the cursor BOs
  drm/amdgpu/dce8: Move hotspot handling out of set_cursor
  drm/amdgpu/dce8: Re-show the cursor after a modeset (v2)
  drm/amdgpu/dce8: Use cursor_set2 hook for enabling / disabling the HW cursor
  drm/amdgpu/dce11: Fold set_cursor() into show_cursor()
  drm/amdgpu/dce11: Clean up reference counting and pinning of the cursor BOs
  drm/amdgpu/dce11: Move hotspot handling out of set_cursor
  ...

37 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/atom.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_kms.c

index 6647fb26ef25ce21dba9bffb87cb31e5abe73d80..736eb4560fd588a3d4a80b77f463b2fa8ef0f0e0 100644 (file)
@@ -79,6 +79,8 @@ extern int amdgpu_bapm;
 extern int amdgpu_deep_color;
 extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
+extern int amdgpu_vm_fault_stop;
+extern int amdgpu_vm_debug;
 extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
@@ -960,6 +962,11 @@ struct amdgpu_ring {
 #define AMDGPU_PTE_FRAG_64KB   (4 << 7)
 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
 
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER     0
+#define AMDGPU_VM_FAULT_STOP_FIRST     1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS    2
+
 struct amdgpu_vm_pt {
        struct amdgpu_bo                *bo;
        uint64_t                        addr;
@@ -1708,7 +1715,7 @@ struct amdgpu_vce {
 /*
  * SDMA
  */
-struct amdgpu_sdma {
+struct amdgpu_sdma_instance {
        /* SDMA firmware */
        const struct firmware   *fw;
        uint32_t                fw_version;
@@ -1718,6 +1725,13 @@ struct amdgpu_sdma {
        bool                    burst_nop;
 };
 
+struct amdgpu_sdma {
+       struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+       struct amdgpu_irq_src   trap_irq;
+       struct amdgpu_irq_src   illegal_inst_irq;
+       int                     num_instances;
+};
+
 /*
  * Firmware
  */
@@ -2064,9 +2078,7 @@ struct amdgpu_device {
        struct amdgpu_gfx               gfx;
 
        /* sdma */
-       struct amdgpu_sdma              sdma[AMDGPU_MAX_SDMA_INSTANCES];
-       struct amdgpu_irq_src           sdma_trap_irq;
-       struct amdgpu_irq_src           sdma_illegal_inst_irq;
+       struct amdgpu_sdma              sdma;
 
        /* uvd */
        bool                            has_uvd;
@@ -2203,17 +2215,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
        ring->ring_free_dw--;
 }
 
-static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+static inline struct amdgpu_sdma_instance *
+amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        int i;
 
-       for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
-               if (&adev->sdma[i].ring == ring)
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               if (&adev->sdma.instance[i].ring == ring)
                        break;
 
        if (i < AMDGPU_MAX_SDMA_INSTANCES)
-               return &adev->sdma[i];
+               return &adev->sdma.instance[i];
        else
                return NULL;
 }
index aef4a7aac0f705325b9804777021a7c7626e342f..a142d5ae148d91eaa89b8a92c95836ba0d0b45f3 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
-#include <linux/vga_switcheroo.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
index dd2037bc0b4afa265435cdc3c27b4ddc2af0071b..0e1376317683e4de30803a987450dba3e01b9621 100644 (file)
@@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
 
        case KGD_ENGINE_SDMA1:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[0].fw->data;
+                                                       adev->sdma.instance[0].fw->data;
                break;
 
        case KGD_ENGINE_SDMA2:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[1].fw->data;
+                                                       adev->sdma.instance[1].fw->data;
                break;
 
        default:
index dfd1d503bccfe6137f94c92b7a39478062a4878b..79fa5c7de856eab635ea9010ab0b8bfc446c37c2 100644 (file)
@@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
 
        case KGD_ENGINE_SDMA1:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[0].fw->data;
+                                                       adev->sdma.instance[0].fw->data;
                break;
 
        case KGD_ENGINE_SDMA2:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[1].fw->data;
+                                                       adev->sdma.instance[1].fw->data;
                break;
 
        default:
index 3f7aaa45bf8e7ae0ee45faafa463e5425ffae463..1a6b239baab920d63e687954dbab2aa86c420422 100644 (file)
@@ -536,7 +536,7 @@ static bool amdgpu_atpx_detect(void)
 
        if (has_atpx && vga_count == 2) {
                acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+               printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                amdgpu_atpx_priv.atpx_detected = true;
                return true;
index 02add0a508cba513bbe18b2a9ca99c1dd79352a8..c44c0c6afd1b15e3d6a3c32bfd62da7129976bfa 100644 (file)
@@ -29,7 +29,6 @@
 #include "amdgpu.h"
 #include "atom.h"
 
-#include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 /*
index 749420f1ea6fbf2bc1417cfd5ea0210cf3c6243d..25012c790f8fd6578f1b65183b61e58d0362cef8 100644 (file)
@@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                }
                break;
        case AMDGPU_HW_IP_DMA:
-               if (ring < 2) {
-                       *out_ring = &adev->sdma[ring].ring;
+               if (ring < adev->sdma.num_instances) {
+                       *out_ring = &adev->sdma.instance[ring].ring;
                } else {
-                       DRM_ERROR("only two SDMA rings are supported\n");
+                       DRM_ERROR("only %d SDMA rings are supported\n",
+                                 adev->sdma.num_instances);
                        return -EINVAL;
                }
                break;
@@ -566,9 +567,24 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                        if (r)
                                return r;
                }
+
+       }
+
+       r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+
+       if (amdgpu_vm_debug && p->bo_list) {
+               /* Invalidate all BOs to test for userspace bugs */
+               for (i = 0; i < p->bo_list->num_entries; i++) {
+                       /* ignore duplicates */
+                       bo = p->bo_list->array[i].robj;
+                       if (!bo)
+                               continue;
+
+                       amdgpu_vm_bo_invalidate(adev, bo);
+               }
        }
 
-       return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+       return r;
 }
 
 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
index 6068d8207d108413bdbe3b33035ca85b685e1ec3..901a460b2c5573bfe4ae1af93c16d8be95140420 100644 (file)
@@ -1022,7 +1022,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
  * amdgpu_switcheroo_set_state - set switcheroo state
  *
  * @pdev: pci dev pointer
- * @state: vga switcheroo state
+ * @state: vga_switcheroo state
  *
  * Callback for the switcheroo driver.  Suspends or resumes the
  * the asics before or after it is powered up using ACPI methods.
@@ -1657,11 +1657,21 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
        }
        drm_modeset_unlock_all(dev);
 
-       /* unpin the front buffers */
+       /* unpin the front buffers and cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
                struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
                struct amdgpu_bo *robj;
 
+               if (amdgpu_crtc->cursor_bo) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, false);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+
                if (rfb == NULL || rfb->obj == NULL) {
                        continue;
                }
@@ -1713,6 +1723,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
        struct drm_connector *connector;
        struct amdgpu_device *adev = dev->dev_private;
+       struct drm_crtc *crtc;
        int r;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1746,6 +1757,24 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                return r;
 
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+               if (amdgpu_crtc->cursor_bo) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, false);
+                       if (r == 0) {
+                               r = amdgpu_bo_pin(aobj,
+                                                 AMDGPU_GEM_DOMAIN_VRAM,
+                                                 &amdgpu_crtc->cursor_addr);
+                               if (r != 0)
+                                       DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+       }
+
        /* blat the mode back in */
        if (fbcon) {
                drm_helper_resume_force_mode(dev);
index adb48353f2e1a10f169df7c2cd4fc6d6f8e2c23a..6134b1ece17fa47d1c409a4d9ead4812d8419720 100644 (file)
@@ -75,11 +75,13 @@ int amdgpu_bapm = -1;
 int amdgpu_deep_color = 0;
 int amdgpu_vm_size = 8;
 int amdgpu_vm_block_size = -1;
+int amdgpu_vm_fault_stop = 0;
+int amdgpu_vm_debug = 0;
 int amdgpu_exp_hw_support = 0;
-int amdgpu_enable_scheduler = 0;
+int amdgpu_enable_scheduler = 1;
 int amdgpu_sched_jobs = 16;
 int amdgpu_sched_hw_submission = 2;
-int amdgpu_enable_semaphores = 1;
+int amdgpu_enable_semaphores = 0;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -141,10 +143,16 @@ module_param_named(vm_size, amdgpu_vm_size, int, 0444);
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
 module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
 
+MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
+module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
+
+MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
+module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
-MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
+MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
 module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
 
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
@@ -153,7 +161,7 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
-MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
+MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
 module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
 
 static struct pci_device_id pciidlist[] = {
index b3fc26c59787f37acf3daff3d17917abf085e3be..fcad7e060938853c5f274a9fd2ddc3c438a1ab14 100644 (file)
@@ -628,8 +628,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
        init_waitqueue_head(&ring->fence_drv.fence_queue);
 
        if (amdgpu_enable_scheduler) {
+               long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+               if (timeout == 0) {
+                       /*
+                        * FIXME:
+                        * Delayed workqueue cannot use it directly,
+                        * so the scheduler will not use delayed workqueue if
+                        * MAX_SCHEDULE_TIMEOUT is set.
+                        * Currently keep it simple and silly.
+                        */
+                       timeout = MAX_SCHEDULE_TIMEOUT;
+               }
                r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
-                                  amdgpu_sched_hw_submission, ring->name);
+                                  amdgpu_sched_hw_submission,
+                                  timeout, ring->name);
                if (r) {
                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
                                  ring->name);
index 275f1c3dbba03c322b0697d20695cb6e9667caba..3f5f2d58ad9464cb3ebaab1a13d921752bced894 100644 (file)
@@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_HW_IP_DMA:
                        type = AMD_IP_BLOCK_TYPE_SDMA;
-                       ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
-                       ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
+                       for (i = 0; i < adev->sdma.num_instances; i++)
+                               ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
                        ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
                        ib_size_alignment = 1;
                        break;
@@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_SDMA:
-                       if (info->query_fw.index >= 2)
+                       if (info->query_fw.index >= adev->sdma.num_instances)
                                return -EINVAL;
-                       fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
-                       fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
+                       fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
+                       fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
                        break;
                default:
                        return -EINVAL;
@@ -489,7 +489,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  *
  * @dev: drm dev pointer
  *
- * Switch vga switcheroo state after last close (all asics).
+ * Switch vga_switcheroo state after last close (all asics).
  */
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 {
index 2b03425f97406487337306a50f76dbbc538c51c1..d8dea52221e9840abb1fa698573c858458edce32 100644 (file)
@@ -373,6 +373,10 @@ struct amdgpu_crtc {
        uint32_t crtc_offset;
        struct drm_gem_object *cursor_bo;
        uint64_t cursor_addr;
+       int cursor_x;
+       int cursor_y;
+       int cursor_hot_x;
+       int cursor_hot_y;
        int cursor_width;
        int cursor_height;
        int max_cursor_width;
index 1a7708f365f37923e747dda4b37e5bd3f0ceb203..0d524384ff79c3b49d2dbaa1489d8cbff231d80a 100644 (file)
@@ -132,6 +132,8 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
                placements[c].fpfn = 0;
                placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                        TTM_PL_FLAG_VRAM;
+               if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
+                       placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
        }
 
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
index 30dce235ddeb4e4f3660338bf5a14d6afa3864c5..b13a74b273a690959dc79b9944a73c3a83fdf9a2 100644 (file)
@@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
 static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
 static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
 static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
-static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
-static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
+static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
+static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
 static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
 static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
 static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
index 961d7265c286524956e1100b14f084eb6e5341b0..76ecbaf72a2e81bf70d12f63c1ca9c7517fdf3e6 100644 (file)
@@ -111,7 +111,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
                      __entry->offset, __entry->flags)
 );
 
-TRACE_EVENT(amdgpu_vm_bo_update,
+DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
            TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
            TP_ARGS(mapping),
            TP_STRUCT__entry(
@@ -129,6 +129,16 @@ TRACE_EVENT(amdgpu_vm_bo_update,
                      __entry->soffset, __entry->eoffset, __entry->flags)
 );
 
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
 TRACE_EVENT(amdgpu_vm_set_page,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint32_t flags),
index 364cbe97533298d45e9087d322c35f7c3923b52a..a089e69e9927610666ff9fb26c2be008335a473a 100644 (file)
@@ -1072,6 +1072,11 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        spin_lock(&glob->lru_lock);
        ret = drm_mm_dump_table(m, mm);
        spin_unlock(&glob->lru_lock);
+       if (ttm_pl == TTM_PL_VRAM)
+               seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
+                          adev->mman.bdev.man[ttm_pl].size,
+                          atomic64_read(&adev->vram_usage) >> 20,
+                          atomic64_read(&adev->vram_vis_usage) >> 20);
        return ret;
 }
 
index 1e14531353e05ec7aadd69ea9d6e019310a25682..0675524eb9de57a5a911dbca2ffcd2b66d39fd32 100644 (file)
@@ -147,8 +147,10 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
        /* check if the id is still valid */
        if (vm_id->id && vm_id->last_id_use &&
-           vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
+           vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
+               trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
                return 0;
+       }
 
        /* we definately need to flush */
        vm_id->pd_gpu_addr = ~0ll;
@@ -850,6 +852,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        return r;
        }
 
+       if (trace_amdgpu_vm_bo_mapping_enabled()) {
+               list_for_each_entry(mapping, &bo_va->valids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
+
+               list_for_each_entry(mapping, &bo_va->invalids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
+       }
+
        spin_lock(&vm->status_lock);
        list_splice_init(&bo_va->invalids, &bo_va->valids);
        list_del_init(&bo_va->vm_status);
index a0346a90d805062eb62dad9812f075b7a585c6ce..1b50e6c13fb3fc2b8136581088ae07e40e8739c3 100644 (file)
@@ -685,6 +685,27 @@ static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
        }
 }
 
+static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint64_t val64;
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       if (src != 0) {
+               val64 = dst;
+               val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
+               do_div(val64, src);
+               ctx->ctx->divmul[0] = lower_32_bits(val64);
+               ctx->ctx->divmul[1] = upper_32_bits(val64);
+       } else {
+               ctx->ctx->divmul[0] = 0;
+               ctx->ctx->divmul[1] = 0;
+       }
+}
+
 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
 {
        /* functionally, a nop */
@@ -788,6 +809,20 @@ static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
        ctx->ctx->divmul[0] = dst * src;
 }
 
+static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint64_t val64;
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       val64 = (uint64_t)dst * (uint64_t)src;
+       ctx->ctx->divmul[0] = lower_32_bits(val64);
+       ctx->ctx->divmul[1] = upper_32_bits(val64);
+}
+
 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
 {
        /* nothing */
@@ -1022,7 +1057,15 @@ static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
 
 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
 {
-       printk(KERN_INFO "unimplemented!\n");
+       uint8_t val = U8((*ptr)++);
+       SDEBUG("DEBUG output: 0x%02X\n", val);
+}
+
+static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint16_t val = U16(*ptr);
+       (*ptr) += val + 2;
+       SDEBUG("PROCESSDS output: 0x%02X\n", val);
 }
 
 static struct {
@@ -1151,7 +1194,13 @@ static struct {
        atom_op_shr, ATOM_ARG_FB}, {
        atom_op_shr, ATOM_ARG_PLL}, {
        atom_op_shr, ATOM_ARG_MC}, {
-atom_op_debug, 0},};
+       atom_op_debug, 0}, {
+       atom_op_processds, 0}, {
+       atom_op_mul32, ATOM_ARG_PS}, {
+       atom_op_mul32, ATOM_ARG_WS}, {
+       atom_op_div32, ATOM_ARG_PS}, {
+       atom_op_div32, ATOM_ARG_WS},
+};
 
 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
 {
index 09d0f8230708a4ff08947600d6913f9a5f9c5b0d..fece8f45dc7a3308c81f22e29fbd8b2a5e1bff70 100644 (file)
@@ -60,7 +60,7 @@
 #define ATOM_CT_PS_MASK                0x7F
 #define ATOM_CT_CODE_PTR       6
 
-#define ATOM_OP_CNT            123
+#define ATOM_OP_CNT            127
 #define ATOM_OP_EOT            91
 
 #define ATOM_CASE_MAGIC                0x63
index 9ea9de457da373f702b401633ca4f7113748cbf6..814598e76c98d5b44432517ae0e5f19c0e1bb37d 100644 (file)
@@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[30];
-       int err, i;
+       int err = 0, i;
 
        DRM_DEBUG("\n");
 
@@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
                else
                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
-               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
-               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
        }
 out:
        if (err) {
                printk(KERN_ERR
                       "cik_sdma: Failed to load firmware \"%s\"\n",
                       fw_name);
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       release_firmware(adev->sdma[i].fw);
-                       adev->sdma[i].fw = NULL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
                }
        }
        return err;
@@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
        return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 }
@@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 }
 
 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
                          SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
        u32 ref_and_mask;
 
-       if (ring == &ring->adev->sdma[0].ring)
+       if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
        else
                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
  */
 static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl;
        int i;
 
@@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
            (adev->mman.buffer_funcs_ring == sdma1))
                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
                cik_sdma_rlc_stop(adev);
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
                if (enable)
                        me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
@@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
        u32 wb_offset;
        int i, j, r;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               ring = &adev->sdma[i].ring;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
                mutex_lock(&adev->srbm_mutex);
@@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
        u32 fw_size;
        int i, j;
 
-       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
-               return -EINVAL;
-
        /* halt the MEs */
        cik_sdma_enable(adev, false);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (!adev->sdma.instance[i].fw)
+                       return -EINVAL;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
-               if (adev->sdma[i].feature_version >= 20)
-                       adev->sdma[i].burst_nop = true;
+               adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma.instance[i].feature_version >= 20)
+                       adev->sdma.instance[i].burst_nop = true;
                fw_data = (const __le32 *)
-                       (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
                for (j = 0; j < fw_size; j++)
                        WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
-               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
        u32 pad_count;
        int i;
 
@@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
        cik_sdma_set_ring_funcs(adev);
        cik_sdma_set_irq_funcs(adev);
        cik_sdma_set_buffer_funcs(adev);
@@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int r;
+       int r, i;
 
        r = cik_sdma_init_microcode(adev);
        if (r) {
@@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle)
        }
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
-       if (r)
-               return r;
-
-       ring = &adev->sdma[0].ring;
-       ring->ring_obj = NULL;
-
-       ring = &adev->sdma[1].ring;
-       ring->ring_obj = NULL;
-
-       ring = &adev->sdma[0].ring;
-       sprintf(ring->name, "sdma0");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
-                            AMDGPU_RING_TYPE_SDMA);
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
-       ring = &adev->sdma[1].ring;
-       sprintf(ring->name, "sdma1");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                                    SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle)
 static int cik_sdma_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
 
-       amdgpu_ring_fini(&adev->sdma[0].ring);
-       amdgpu_ring_fini(&adev->sdma[1].ring);
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        return 0;
 }
@@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle)
        dev_info(adev->dev, "CIK SDMA registers\n");
        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
                 RREG32(mmSRBM_STATUS2));
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
                dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
@@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
        case 0:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
        case 1:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1334,8 +1326,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
-       adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
@@ -1349,9 +1343,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
 
 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
-       adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
-       adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
+       adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
 }
 
 /**
@@ -1416,7 +1410,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
 {
        if (adev->mman.buffer_funcs == NULL) {
                adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
        }
 }
 
@@ -1431,7 +1425,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
-               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
        }
 }
index e4d101b1252a47eaf7a2c7e35c2d8d83f737d762..37073930e2c9f92f0024059b998231bf2ad7272c 100644 (file)
@@ -2499,26 +2499,19 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
        tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
-static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
-
-       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
-              upper_32_bits(gpu_addr));
-       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-              lower_32_bits(gpu_addr));
-}
-
-static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
-                                    int x, int y)
+static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
+                                       int x, int y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2538,26 +2531,40 @@ static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       dce_v10_0_lock_cursor(crtc, true);
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-       dce_v10_0_lock_cursor(crtc, false);
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
 
        return 0;
 }
 
-static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
-                                   struct drm_file *file_priv,
-                                   uint32_t handle,
-                                   uint32_t width,
-                                   uint32_t height)
+static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                     int x, int y)
+{
+       int ret;
+
+       dce_v10_0_lock_cursor(crtc, true);
+       ret = dce_v10_0_cursor_move_locked(crtc, x, y);
+       dce_v10_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                     struct drm_file *file_priv,
+                                     uint32_t handle,
+                                     uint32_t width,
+                                     uint32_t height,
+                                     int32_t hot_x,
+                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct drm_gem_object *obj;
-       struct amdgpu_bo *robj;
-       uint64_t gpu_addr;
+       struct amdgpu_bo *aobj;
        int ret;
 
        if (!handle) {
@@ -2579,41 +2586,71 @@ static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_amdgpu_bo(obj);
-       ret = amdgpu_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, 0, &gpu_addr);
-       amdgpu_bo_unreserve(robj);
-       if (ret)
-               goto fail;
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        amdgpu_crtc->cursor_width = width;
        amdgpu_crtc->cursor_height = height;
 
        dce_v10_0_lock_cursor(crtc, true);
-       dce_v10_0_set_cursor(crtc, obj, gpu_addr);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v10_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
        dce_v10_0_show_cursor(crtc);
        dce_v10_0_lock_cursor(crtc, false);
 
 unpin:
        if (amdgpu_crtc->cursor_bo) {
-               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(robj, false);
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
                if (likely(ret == 0)) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
                }
                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
        }
 
        amdgpu_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v10_0_lock_cursor(crtc, true);
+
+               dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                            amdgpu_crtc->cursor_y);
+
+               dce_v10_0_show_cursor(crtc);
+
+               dce_v10_0_lock_cursor(crtc, false);
+       }
 }
 
 static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2641,7 +2678,7 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
-       .cursor_set = dce_v10_0_crtc_cursor_set,
+       .cursor_set2 = dce_v10_0_crtc_cursor_set2,
        .cursor_move = dce_v10_0_crtc_cursor_move,
        .gamma_set = dce_v10_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
@@ -2774,6 +2811,7 @@ static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
        dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
        amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v10_0_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        amdgpu_crtc->hw_mode = *adjusted_mode;
 
@@ -3267,37 +3305,20 @@ static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
                                            unsigned type,
                                            enum amdgpu_interrupt_state state)
 {
-       u32 reg, reg_block;
-       /* now deal with page flip IRQ */
-       switch (type) {
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", type);
-                       return -EINVAL;
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
        }
 
-       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
        if (state == AMDGPU_IRQ_STATE_DISABLE)
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
        else
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
        return 0;
 }
@@ -3306,7 +3327,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
                                  struct amdgpu_irq_src *source,
                                  struct amdgpu_iv_entry *entry)
 {
-       int reg_block;
        unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
@@ -3315,33 +3335,15 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
        crtc_id = (entry->src_id - 8) >> 1;
        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
-       /* ack the interrupt */
-       switch(crtc_id){
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-                       return -EINVAL;
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
        }
 
-       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
        /* IRQ could occur when in initial stage */
        if (amdgpu_crtc == NULL)
index 6411e824467164831eef8af634051f95b8faba69..c1147ecff1eeba1b22fee872abd7fe750c963151 100644 (file)
@@ -2476,26 +2476,19 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
        tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
-static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
-
-       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
-              upper_32_bits(gpu_addr));
-       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-              lower_32_bits(gpu_addr));
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
-                                    int x, int y)
+static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
+                                       int x, int y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2515,26 +2508,40 @@ static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       dce_v11_0_lock_cursor(crtc, true);
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-       dce_v11_0_lock_cursor(crtc, false);
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
 
        return 0;
 }
 
-static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
-                                   struct drm_file *file_priv,
-                                   uint32_t handle,
-                                   uint32_t width,
-                                   uint32_t height)
+static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                     int x, int y)
+{
+       int ret;
+
+       dce_v11_0_lock_cursor(crtc, true);
+       ret = dce_v11_0_cursor_move_locked(crtc, x, y);
+       dce_v11_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                     struct drm_file *file_priv,
+                                     uint32_t handle,
+                                     uint32_t width,
+                                     uint32_t height,
+                                     int32_t hot_x,
+                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct drm_gem_object *obj;
-       struct amdgpu_bo *robj;
-       uint64_t gpu_addr;
+       struct amdgpu_bo *aobj;
        int ret;
 
        if (!handle) {
@@ -2556,41 +2563,71 @@ static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_amdgpu_bo(obj);
-       ret = amdgpu_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, 0, &gpu_addr);
-       amdgpu_bo_unreserve(robj);
-       if (ret)
-               goto fail;
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        amdgpu_crtc->cursor_width = width;
        amdgpu_crtc->cursor_height = height;
 
        dce_v11_0_lock_cursor(crtc, true);
-       dce_v11_0_set_cursor(crtc, obj, gpu_addr);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v11_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
        dce_v11_0_show_cursor(crtc);
        dce_v11_0_lock_cursor(crtc, false);
 
 unpin:
        if (amdgpu_crtc->cursor_bo) {
-               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(robj, false);
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
                if (likely(ret == 0)) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
                }
                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
        }
 
        amdgpu_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v11_0_lock_cursor(crtc, true);
+
+               dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                            amdgpu_crtc->cursor_y);
+
+               dce_v11_0_show_cursor(crtc);
+
+               dce_v11_0_lock_cursor(crtc, false);
+       }
 }
 
 static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2618,7 +2655,7 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
-       .cursor_set = dce_v11_0_crtc_cursor_set,
+       .cursor_set2 = dce_v11_0_crtc_cursor_set2,
        .cursor_move = dce_v11_0_crtc_cursor_move,
        .gamma_set = dce_v11_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
@@ -2751,6 +2788,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
        dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
        amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v11_0_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        amdgpu_crtc->hw_mode = *adjusted_mode;
 
@@ -3243,37 +3281,20 @@ static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
                                            unsigned type,
                                            enum amdgpu_interrupt_state state)
 {
-       u32 reg, reg_block;
-       /* now deal with page flip IRQ */
-       switch (type) {
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", type);
-                       return -EINVAL;
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
        }
 
-       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
        if (state == AMDGPU_IRQ_STATE_DISABLE)
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
        else
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
        return 0;
 }
@@ -3282,7 +3303,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
                                  struct amdgpu_irq_src *source,
                                  struct amdgpu_iv_entry *entry)
 {
-       int reg_block;
        unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
@@ -3291,33 +3311,15 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
        crtc_id = (entry->src_id - 8) >> 1;
        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
-       /* ack the interrupt */
-       switch(crtc_id){
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-                       return -EINVAL;
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
        }
 
-       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
        /* IRQ could occur when in initial stage */
        if(amdgpu_crtc == NULL)
index c86911c2ea2a896f414473f798d782e9c08518cf..d784fb43efc2d4c54f1d653a09b6fccdb2ddd958 100644 (file)
@@ -2411,26 +2411,19 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
 
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
                   CUR_CONTROL__CURSOR_EN_MASK |
                   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
                   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 }
 
-static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
-
-       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
-              upper_32_bits(gpu_addr));
-       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-              gpu_addr & 0xffffffff);
-}
-
-static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
-                                    int x, int y)
+static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
+                                      int x, int y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2450,26 +2443,40 @@ static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       dce_v8_0_lock_cursor(crtc, true);
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-       dce_v8_0_lock_cursor(crtc, false);
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
 
        return 0;
 }
 
-static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
-                                   struct drm_file *file_priv,
-                                   uint32_t handle,
-                                   uint32_t width,
-                                   uint32_t height)
+static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                    int x, int y)
+{
+       int ret;
+
+       dce_v8_0_lock_cursor(crtc, true);
+       ret = dce_v8_0_cursor_move_locked(crtc, x, y);
+       dce_v8_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                    struct drm_file *file_priv,
+                                    uint32_t handle,
+                                    uint32_t width,
+                                    uint32_t height,
+                                    int32_t hot_x,
+                                    int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct drm_gem_object *obj;
-       struct amdgpu_bo *robj;
-       uint64_t gpu_addr;
+       struct amdgpu_bo *aobj;
        int ret;
 
        if (!handle) {
@@ -2491,41 +2498,71 @@ static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_amdgpu_bo(obj);
-       ret = amdgpu_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, 0, &gpu_addr);
-       amdgpu_bo_unreserve(robj);
-       if (ret)
-               goto fail;
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        amdgpu_crtc->cursor_width = width;
        amdgpu_crtc->cursor_height = height;
 
        dce_v8_0_lock_cursor(crtc, true);
-       dce_v8_0_set_cursor(crtc, obj, gpu_addr);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v8_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
        dce_v8_0_show_cursor(crtc);
        dce_v8_0_lock_cursor(crtc, false);
 
 unpin:
        if (amdgpu_crtc->cursor_bo) {
-               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(robj, false);
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
                if (likely(ret == 0)) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
                }
                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
        }
 
        amdgpu_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v8_0_lock_cursor(crtc, true);
+
+               dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                           amdgpu_crtc->cursor_y);
+
+               dce_v8_0_show_cursor(crtc);
+
+               dce_v8_0_lock_cursor(crtc, false);
+       }
 }
 
 static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2553,7 +2590,7 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
-       .cursor_set = dce_v8_0_crtc_cursor_set,
+       .cursor_set2 = dce_v8_0_crtc_cursor_set2,
        .cursor_move = dce_v8_0_crtc_cursor_move,
        .gamma_set = dce_v8_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
@@ -2693,6 +2730,7 @@ static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
        dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
        amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v8_0_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        amdgpu_crtc->hw_mode = *adjusted_mode;
 
@@ -3274,37 +3312,20 @@ static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
                                                 unsigned type,
                                                 enum amdgpu_interrupt_state state)
 {
-       u32 reg, reg_block;
-       /* now deal with page flip IRQ */
-       switch (type) {
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", type);
-                       return -EINVAL;
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
        }
 
-       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
        if (state == AMDGPU_IRQ_STATE_DISABLE)
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
        else
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
        return 0;
 }
@@ -3313,7 +3334,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
                                struct amdgpu_irq_src *source,
                                struct amdgpu_iv_entry *entry)
 {
-       int reg_block;
        unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
@@ -3322,33 +3342,15 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
        crtc_id = (entry->src_id - 8) >> 1;
        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
-       /* ack the interrupt */
-       switch(crtc_id){
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-                       return -EINVAL;
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
        }
 
-       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
        /* IRQ could occur when in initial stage */
        if (amdgpu_crtc == NULL)
index cb4f68f53f248ab58cdb00100dc0da5d6f81da9d..718250ae9856b144c86d6f0ad5db5246ca16c737 100644 (file)
@@ -903,6 +903,191 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
        return 0;
 }
 
+static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
+{
+       u32 gb_addr_config;
+       u32 mc_shared_chmap, mc_arb_ramcfg;
+       u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
+       u32 tmp;
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 2;
+               adev->gfx.config.max_cu_per_sh = 6;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_FIJI:
+               adev->gfx.config.max_shader_engines = 4;
+               adev->gfx.config.max_tile_pipes = 16;
+               adev->gfx.config.max_cu_per_sh = 16;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_TONGA:
+               adev->gfx.config.max_shader_engines = 4;
+               adev->gfx.config.max_tile_pipes = 8;
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_CARRIZO:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 2;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+
+               switch (adev->pdev->revision) {
+               case 0xc4:
+               case 0x84:
+               case 0xc8:
+               case 0xcc:
+                       /* B10 */
+                       adev->gfx.config.max_cu_per_sh = 8;
+                       break;
+               case 0xc5:
+               case 0x81:
+               case 0x85:
+               case 0xc9:
+               case 0xcd:
+                       /* B8 */
+                       adev->gfx.config.max_cu_per_sh = 6;
+                       break;
+               case 0xc6:
+               case 0xca:
+               case 0xce:
+                       /* B6 */
+                       adev->gfx.config.max_cu_per_sh = 6;
+                       break;
+               case 0xc7:
+               case 0x87:
+               case 0xcb:
+               default:
+                       /* B4 */
+                       adev->gfx.config.max_cu_per_sh = 4;
+                       break;
+               }
+
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       default:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 2;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       }
+
+       mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+       adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
+       mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+
+       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+       adev->gfx.config.mem_max_burst_length_bytes = 256;
+       if (adev->flags & AMD_IS_APU) {
+               /* Get memory bank mapping mode. */
+               tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
+               dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+               dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+               tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
+               dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+               dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+               /* Validate settings in case only one DIMM installed. */
+               if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
+                       dimm00_addr_map = 0;
+               if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
+                       dimm01_addr_map = 0;
+               if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
+                       dimm10_addr_map = 0;
+               if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
+                       dimm11_addr_map = 0;
+
+               /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
+               /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
+               if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
+                       adev->gfx.config.mem_row_size_in_kb = 2;
+               else
+                       adev->gfx.config.mem_row_size_in_kb = 1;
+       } else {
+               tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
+               adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+               if (adev->gfx.config.mem_row_size_in_kb > 4)
+                       adev->gfx.config.mem_row_size_in_kb = 4;
+       }
+
+       adev->gfx.config.shader_engine_tile_size = 32;
+       adev->gfx.config.num_gpus = 1;
+       adev->gfx.config.multi_gpu_tile_size = 64;
+
+       /* fix up row size */
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+       default:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
+               break;
+       case 2:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
+               break;
+       case 4:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
+               break;
+       }
+       adev->gfx.config.gb_addr_config = gb_addr_config;
+}
+
 static int gfx_v8_0_sw_init(void *handle)
 {
        int i, r;
@@ -1010,6 +1195,8 @@ static int gfx_v8_0_sw_init(void *handle)
 
        adev->gfx.ce_ram_size = 0x8000;
 
+       gfx_v8_0_gpu_early_init(adev);
+
        return 0;
 }
 
@@ -2043,203 +2230,23 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 
 static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 {
-       u32 gb_addr_config;
-       u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
        u32 tmp;
        int i;
 
-       switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               adev->gfx.config.max_shader_engines = 1;
-               adev->gfx.config.max_tile_pipes = 2;
-               adev->gfx.config.max_cu_per_sh = 6;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-               adev->gfx.config.max_texture_channel_caches = 2;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       case CHIP_FIJI:
-               adev->gfx.config.max_shader_engines = 4;
-               adev->gfx.config.max_tile_pipes = 16;
-               adev->gfx.config.max_cu_per_sh = 16;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 4;
-               adev->gfx.config.max_texture_channel_caches = 8;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       case CHIP_TONGA:
-               adev->gfx.config.max_shader_engines = 4;
-               adev->gfx.config.max_tile_pipes = 8;
-               adev->gfx.config.max_cu_per_sh = 8;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-               adev->gfx.config.max_texture_channel_caches = 8;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       case CHIP_CARRIZO:
-               adev->gfx.config.max_shader_engines = 1;
-               adev->gfx.config.max_tile_pipes = 2;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-
-               switch (adev->pdev->revision) {
-               case 0xc4:
-               case 0x84:
-               case 0xc8:
-               case 0xcc:
-                       /* B10 */
-                       adev->gfx.config.max_cu_per_sh = 8;
-                       break;
-               case 0xc5:
-               case 0x81:
-               case 0x85:
-               case 0xc9:
-               case 0xcd:
-                       /* B8 */
-                       adev->gfx.config.max_cu_per_sh = 6;
-                       break;
-               case 0xc6:
-               case 0xca:
-               case 0xce:
-                       /* B6 */
-                       adev->gfx.config.max_cu_per_sh = 6;
-                       break;
-               case 0xc7:
-               case 0x87:
-               case 0xcb:
-               default:
-                       /* B4 */
-                       adev->gfx.config.max_cu_per_sh = 4;
-                       break;
-               }
-
-               adev->gfx.config.max_texture_channel_caches = 2;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       default:
-               adev->gfx.config.max_shader_engines = 2;
-               adev->gfx.config.max_tile_pipes = 4;
-               adev->gfx.config.max_cu_per_sh = 2;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-               adev->gfx.config.max_texture_channel_caches = 4;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       }
-
        tmp = RREG32(mmGRBM_CNTL);
        tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
        WREG32(mmGRBM_CNTL, tmp);
 
-       mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
-       adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
-       mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
-
-       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
-       adev->gfx.config.mem_max_burst_length_bytes = 256;
-       if (adev->flags & AMD_IS_APU) {
-               /* Get memory bank mapping mode. */
-               tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
-               dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
-               dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
-               tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
-               dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
-               dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
-               /* Validate settings in case only one DIMM installed. */
-               if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
-                       dimm00_addr_map = 0;
-               if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
-                       dimm01_addr_map = 0;
-               if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
-                       dimm10_addr_map = 0;
-               if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
-                       dimm11_addr_map = 0;
-
-               /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
-               /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
-               if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
-                       adev->gfx.config.mem_row_size_in_kb = 2;
-               else
-                       adev->gfx.config.mem_row_size_in_kb = 1;
-       } else {
-               tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
-               adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
-               if (adev->gfx.config.mem_row_size_in_kb > 4)
-                       adev->gfx.config.mem_row_size_in_kb = 4;
-       }
-
-       adev->gfx.config.shader_engine_tile_size = 32;
-       adev->gfx.config.num_gpus = 1;
-       adev->gfx.config.multi_gpu_tile_size = 64;
-
-       /* fix up row size */
-       switch (adev->gfx.config.mem_row_size_in_kb) {
-       case 1:
-       default:
-               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
-               break;
-       case 2:
-               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
-               break;
-       case 4:
-               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
-               break;
-       }
-       adev->gfx.config.gb_addr_config = gb_addr_config;
-
-       WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
+       WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
        WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
-              gb_addr_config & 0x70);
+              adev->gfx.config.gb_addr_config & 0x70);
        WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
-              gb_addr_config & 0x70);
-       WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+              adev->gfx.config.gb_addr_config & 0x70);
+       WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 
        gfx_v8_0_tiling_mode_table_init(adev);
 
@@ -2256,13 +2263,13 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                if (i == 0) {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
-                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 
+                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        WREG32(mmSH_MEM_CONFIG, tmp);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
-                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 
+                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        WREG32(mmSH_MEM_CONFIG, tmp);
                }
index 774528ab8704387f00525618b1c48578387b70df..488348272c4d1a0e5a9d93d80277abdf5407039e 100644 (file)
@@ -435,6 +435,33 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * gmc_v8_0_set_fault_enable_default - update VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
+                                             bool value)
+{
+       u32 tmp;
+
+       tmp = RREG32(mmVM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       WREG32(mmVM_CONTEXT1_CNTL, tmp);
+}
+
 /**
  * gmc_v7_0_gart_enable - gart enable
  *
@@ -523,15 +550,13 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        tmp = RREG32(mmVM_CONTEXT1_CNTL);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
                            amdgpu_vm_block_size - 9);
        WREG32(mmVM_CONTEXT1_CNTL, tmp);
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v7_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v7_0_set_fault_enable_default(adev, true);
 
        if (adev->asic_type == CHIP_KAVERI) {
                tmp = RREG32(mmCHUB_CONTROL);
@@ -1262,6 +1287,15 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+               gmc_v7_0_set_fault_enable_default(adev, false);
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1303,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 9a07742620d0361ad054930ff3b07c75c9bbcf2c..42b5ff827055146017b707c59fca8ac3d9fc9b7a 100644 (file)
@@ -549,6 +549,35 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * gmc_v8_0_set_fault_enable_default - update VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
+                                             bool value)
+{
+       u32 tmp;
+
+       tmp = RREG32(mmVM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       WREG32(mmVM_CONTEXT1_CNTL, tmp);
+}
+
 /**
  * gmc_v8_0_gart_enable - gart enable
  *
@@ -663,6 +692,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
                            amdgpu_vm_block_size - 9);
        WREG32(mmVM_CONTEXT1_CNTL, tmp);
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v8_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v8_0_set_fault_enable_default(adev, true);
 
        gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1262,6 +1295,15 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+               gmc_v8_0_set_fault_enable_default(adev, false);
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1311,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 14e87234171aeacb9cbbbedafa358e22825b5526..f8b868c7c4960f80f6f33e472becb1aeaff9cafb 100644 (file)
@@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[30];
-       int err, i;
+       int err = 0, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
        const struct sdma_firmware_header_v1_0 *hdr;
@@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
                else
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
-               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
-               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
                if (err)
                        goto out;
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
-               if (adev->sdma[i].feature_version >= 20)
-                       adev->sdma[i].burst_nop = true;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+               adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma.instance[i].feature_version >= 20)
+                       adev->sdma.instance[i].burst_nop = true;
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
                        info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
-                       info->fw = adev->sdma[i].fw;
+                       info->fw = adev->sdma.instance[i].fw;
                        header = (const struct common_firmware_header *)info->fw->data;
                        adev->firmware.fw_size +=
                                ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -164,9 +164,9 @@ out:
                printk(KERN_ERR
                       "sdma_v2_4: Failed to load firmware \"%s\"\n",
                       fw_name);
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       release_firmware(adev->sdma[i].fw);
-                       adev->sdma[i].fw = NULL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
                }
        }
        return err;
@@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+       int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
        u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
 
        return wptr;
@@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+       int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 }
 
 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
 
-       if (ring == &ring->adev->sdma[0].ring)
+       if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
        else
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
  */
 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
@@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
            (adev->mman.buffer_funcs_ring == sdma1))
                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
                sdma_v2_4_rlc_stop(adev);
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
                if (enable)
                        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
        u32 wb_offset;
        int i, j, r;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               ring = &adev->sdma[i].ring;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
                mutex_lock(&adev->srbm_mutex);
@@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
        const __le32 *fw_data;
        u32 fw_size;
        int i, j;
-       bool smc_loads_fw = false; /* XXX fix me */
-
-       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
-               return -EINVAL;
 
        /* halt the MEs */
        sdma_v2_4_enable(adev, false);
 
-       if (smc_loads_fw) {
-               /* XXX query SMC for fw load complete */
-       } else {
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
-                       amdgpu_ucode_print_sdma_hdr(&hdr->header);
-                       fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-                       fw_data = (const __le32 *)
-                               (adev->sdma[i].fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
-                       for (j = 0; j < fw_size; j++)
-                               WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
-                       WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
-               }
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (!adev->sdma.instance[i].fw)
+                       return -EINVAL;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+               amdgpu_ucode_print_sdma_hdr(&hdr->header);
+               fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+               fw_data = (const __le32 *)
+                       (adev->sdma.instance[i].fw->data +
+                        le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+               for (j = 0; j < fw_size; j++)
+                       WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
        u32 pad_count;
        int i;
 
@@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
        sdma_v2_4_set_ring_funcs(adev);
        sdma_v2_4_set_buffer_funcs(adev);
        sdma_v2_4_set_vm_pte_funcs(adev);
@@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle)
 static int sdma_v2_4_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
@@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle)
                return r;
        }
 
-       ring = &adev->sdma[0].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = false;
-
-       ring = &adev->sdma[1].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = false;
-
-       ring = &adev->sdma[0].ring;
-       sprintf(ring->name, "sdma0");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
-
-       ring = &adev->sdma[1].ring;
-       sprintf(ring->name, "sdma1");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               ring->use_doorbell = false;
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                                    SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle)
 static int sdma_v2_4_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
 
-       amdgpu_ring_fini(&adev->sdma[0].ring);
-       amdgpu_ring_fini(&adev->sdma[1].ring);
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        return 0;
 }
@@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle)
        dev_info(adev->dev, "VI SDMA registers\n");
        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
                 RREG32(mmSRBM_STATUS2));
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
                dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
@@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
        case 0:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
        case 1:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1345,8 +1331,10 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs;
-       adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
@@ -1360,9 +1348,9 @@ static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
 
 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
-       adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
-       adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
+       adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
 }
 
 /**
@@ -1428,7 +1416,7 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
 {
        if (adev->mman.buffer_funcs == NULL) {
                adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
        }
 }
 
@@ -1443,7 +1431,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
-               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
        }
 }
index 9bfe92df15f712b86f45e674a8c995f10812ed0f..670555a45da91b052446bb99eba4599a082efa46 100644 (file)
@@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[30];
-       int err, i;
+       int err = 0, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
        const struct sdma_firmware_header_v1_0 *hdr;
@@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
                else
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
-               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
-               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
                if (err)
                        goto out;
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
-               if (adev->sdma[i].feature_version >= 20)
-                       adev->sdma[i].burst_nop = true;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+               adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma.instance[i].feature_version >= 20)
+                       adev->sdma.instance[i].burst_nop = true;
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
                        info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
-                       info->fw = adev->sdma[i].fw;
+                       info->fw = adev->sdma.instance[i].fw;
                        header = (const struct common_firmware_header *)info->fw->data;
                        adev->firmware.fw_size +=
                                ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -235,9 +235,9 @@ out:
                printk(KERN_ERR
                       "sdma_v3_0: Failed to load firmware \"%s\"\n",
                       fw_name);
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       release_firmware(adev->sdma[i].fw);
-                       adev->sdma[i].fw = NULL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
                }
        }
        return err;
@@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
                /* XXX check if swapping is necessary on BE */
                wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
        } else {
-               int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
                wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
        }
@@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
                adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
                WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
        } else {
-               int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
        }
@@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
 
-       if (ring == &ring->adev->sdma[0].ring)
+       if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
        else
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
  */
 static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
@@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
            (adev->mman.buffer_funcs_ring == sdma1))
                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
                if (enable)
                        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
@@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
                sdma_v3_0_rlc_stop(adev);
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
                if (enable)
                        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
        u32 doorbell;
        int i, j, r;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               ring = &adev->sdma[i].ring;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
                mutex_lock(&adev->srbm_mutex);
@@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
        u32 fw_size;
        int i, j;
 
-       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
-               return -EINVAL;
-
        /* halt the MEs */
        sdma_v3_0_enable(adev, false);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (!adev->sdma.instance[i].fw)
+                       return -EINVAL;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
                fw_data = (const __le32 *)
-                       (adev->sdma[i].fw->data +
+                       (adev->sdma.instance[i].fw->data +
                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
                for (j = 0; j < fw_size; j++)
                        WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
-               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
  */
 static int sdma_v3_0_start(struct amdgpu_device *adev)
 {
-       int r;
+       int r, i;
 
        if (!adev->firmware.smu_load) {
                r = sdma_v3_0_load_microcode(adev);
                if (r)
                        return r;
        } else {
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA0);
-               if (r)
-                       return -EINVAL;
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA1);
-               if (r)
-                       return -EINVAL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                                        (i == 0) ?
+                                                                        AMDGPU_UCODE_ID_SDMA0 :
+                                                                        AMDGPU_UCODE_ID_SDMA1);
+                       if (r)
+                               return -EINVAL;
+               }
        }
 
        /* unhalt the MEs */
@@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
        u32 pad_count;
        int i;
 
@@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       switch (adev->asic_type) {
+       default:
+               adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+               break;
+       }
+
        sdma_v3_0_set_ring_funcs(adev);
        sdma_v3_0_set_buffer_funcs(adev);
        sdma_v3_0_set_vm_pte_funcs(adev);
@@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle)
 static int sdma_v3_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
@@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle)
                return r;
        }
 
-       ring = &adev->sdma[0].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = true;
-       ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0;
-
-       ring = &adev->sdma[1].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = true;
-       ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1;
-
-       ring = &adev->sdma[0].ring;
-       sprintf(ring->name, "sdma0");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
-
-       ring = &adev->sdma[1].ring;
-       sprintf(ring->name, "sdma1");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               ring->use_doorbell = true;
+               ring->doorbell_index = (i == 0) ?
+                       AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                                    SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle)
 static int sdma_v3_0_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
 
-       amdgpu_ring_fini(&adev->sdma[0].ring);
-       amdgpu_ring_fini(&adev->sdma[1].ring);
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        return 0;
 }
@@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle)
        dev_info(adev->dev, "VI SDMA registers\n");
        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
                 RREG32(mmSRBM_STATUS2));
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
                dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
@@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
        case 0:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
        case 1:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs;
-       adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
@@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
 
 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
-       adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
-       adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
+       adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
 }
 
 /**
@@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
        if (adev->mman.buffer_funcs == NULL) {
                adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
        }
 }
 
@@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
-               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
        }
 }
index 3697eeeecf82a75ef1a5b0ee44e6b4d572841053..7fa1d7a438e9c71ba76477f76d3f2d5379ca5955 100644 (file)
@@ -327,19 +327,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        struct amd_sched_fence *s_fence =
                container_of(cb, struct amd_sched_fence, cb);
        struct amd_gpu_scheduler *sched = s_fence->sched;
+       unsigned long flags;
 
        atomic_dec(&sched->hw_rq_count);
        amd_sched_fence_signal(s_fence);
+       if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+               cancel_delayed_work_sync(&s_fence->dwork);
+               spin_lock_irqsave(&sched->fence_list_lock, flags);
+               list_del_init(&s_fence->list);
+               spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+       }
        fence_put(&s_fence->base);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+static void amd_sched_fence_work_func(struct work_struct *work)
+{
+       struct amd_sched_fence *s_fence =
+               container_of(work, struct amd_sched_fence, dwork.work);
+       struct amd_gpu_scheduler *sched = s_fence->sched;
+       struct amd_sched_fence *entity, *tmp;
+       unsigned long flags;
+
+       DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
+
+       /* Clean all pending fences */
+       spin_lock_irqsave(&sched->fence_list_lock, flags);
+       list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
+               DRM_ERROR("  fence no %d\n", entity->base.seqno);
+               cancel_delayed_work(&entity->dwork);
+               list_del_init(&entity->list);
+               fence_put(&entity->base);
+       }
+       spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+}
+
 static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
        struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
        int r, count;
 
+       spin_lock_init(&sched->fence_list_lock);
+       INIT_LIST_HEAD(&sched->fence_list);
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
@@ -347,6 +377,7 @@ static int amd_sched_main(void *param)
                struct amd_sched_fence *s_fence;
                struct amd_sched_job *sched_job;
                struct fence *fence;
+               unsigned long flags;
 
                wait_event_interruptible(sched->wake_up_worker,
                        kthread_should_stop() ||
@@ -357,6 +388,15 @@ static int amd_sched_main(void *param)
 
                entity = sched_job->s_entity;
                s_fence = sched_job->s_fence;
+
+               if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+                       INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
+                       schedule_delayed_work(&s_fence->dwork, sched->timeout);
+                       spin_lock_irqsave(&sched->fence_list_lock, flags);
+                       list_add_tail(&s_fence->list, &sched->fence_list);
+                       spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+               }
+
                atomic_inc(&sched->hw_rq_count);
                fence = sched->ops->run_job(sched_job);
                if (fence) {
@@ -392,11 +432,12 @@ static int amd_sched_main(void *param)
 */
 int amd_sched_init(struct amd_gpu_scheduler *sched,
                   struct amd_sched_backend_ops *ops,
-                  unsigned hw_submission, const char *name)
+                  unsigned hw_submission, long timeout, const char *name)
 {
        sched->ops = ops;
        sched->hw_submission_limit = hw_submission;
        sched->name = name;
+       sched->timeout = timeout;
        amd_sched_rq_init(&sched->sched_rq);
        amd_sched_rq_init(&sched->kernel_rq);
 
index 80b64dc2221417938347e8ac463a6d3d676b9f0d..929e9aced04195e24ed5ddcae99ab385ce830898 100644 (file)
@@ -68,6 +68,8 @@ struct amd_sched_fence {
        struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
+       struct delayed_work             dwork;
+       struct list_head                list;
 };
 
 struct amd_sched_job {
@@ -103,18 +105,21 @@ struct amd_sched_backend_ops {
 struct amd_gpu_scheduler {
        struct amd_sched_backend_ops    *ops;
        uint32_t                        hw_submission_limit;
+       long                            timeout;
        const char                      *name;
        struct amd_sched_rq             sched_rq;
        struct amd_sched_rq             kernel_rq;
        wait_queue_head_t               wake_up_worker;
        wait_queue_head_t               job_scheduled;
        atomic_t                        hw_rq_count;
+       struct list_head                fence_list;
+       spinlock_t                      fence_list_lock;
        struct task_struct              *thread;
 };
 
 int amd_sched_init(struct amd_gpu_scheduler *sched,
                   struct amd_sched_backend_ops *ops,
-                  uint32_t hw_submission, const char *name);
+                  uint32_t hw_submission, long timeout, const char *name);
 void amd_sched_fini(struct amd_gpu_scheduler *sched);
 
 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
index c9e0fbbf76a3cb8f29c6b41a0beea3554c832551..46f87d4aaf31fef4ae90d50321aee3f67439031f 100644 (file)
@@ -34,6 +34,8 @@
 #define MAX(a,b)                   (((a)>(b))?(a):(b))
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
+#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
+
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
                           struct radeon_bo_list **cs_reloc);
 struct evergreen_cs_track {
@@ -84,6 +86,7 @@ struct evergreen_cs_track {
        u32                     htile_surface;
        struct radeon_bo        *htile_bo;
        unsigned long           indirect_draw_buffer_size;
+       const unsigned          *reg_safe_bm;
 };
 
 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -444,7 +447,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
                 * command stream.
                 */
                if (!surf.mode) {
-                       volatile u32 *ib = p->ib.ptr;
+                       uint32_t *ib = p->ib.ptr;
                        unsigned long tmp, nby, bsize, size, min = 0;
 
                        /* find the height the ddx wants */
@@ -1083,41 +1086,18 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
 }
 
 /**
- * evergreen_cs_check_reg() - check if register is authorized or not
+ * evergreen_cs_handle_reg() - process registers that need special handling.
  * @parser: parser structure holding parsing context
  * @reg: register we are testing
  * @idx: index into the cs buffer
- *
- * This function will test against evergreen_reg_safe_bm and return 0
- * if register is safe. If register is not flag as safe this function
- * will test it against a list of register needind special handling.
  */
-static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
        struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
        struct radeon_bo_list *reloc;
-       u32 last_reg;
-       u32 m, i, tmp, *ib;
+       u32 tmp, *ib;
        int r;
 
-       if (p->rdev->family >= CHIP_CAYMAN)
-               last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
-       else
-               last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
-
-       i = (reg >> 7);
-       if (i >= last_reg) {
-               dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
-               return -EINVAL;
-       }
-       m = 1 << ((reg >> 2) & 31);
-       if (p->rdev->family >= CHIP_CAYMAN) {
-               if (!(cayman_reg_safe_bm[i] & m))
-                       return 0;
-       } else {
-               if (!(evergreen_reg_safe_bm[i] & m))
-                       return 0;
-       }
        ib = p->ib.ptr;
        switch (reg) {
        /* force following reg to 0 in an attempt to disable out buffer
@@ -1764,29 +1744,27 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        return 0;
 }
 
-static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+/**
+ * evergreen_is_safe_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ *
+ * This function will test against reg_safe_bm and return true
+ * if register is safe or false otherwise.
+ */
+static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
 {
-       u32 last_reg, m, i;
-
-       if (p->rdev->family >= CHIP_CAYMAN)
-               last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
-       else
-               last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+       struct evergreen_cs_track *track = p->track;
+       u32 m, i;
 
        i = (reg >> 7);
-       if (i >= last_reg) {
-               dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+       if (unlikely(i >= REG_SAFE_BM_SIZE)) {
                return false;
        }
        m = 1 << ((reg >> 2) & 31);
-       if (p->rdev->family >= CHIP_CAYMAN) {
-               if (!(cayman_reg_safe_bm[i] & m))
-                       return true;
-       } else {
-               if (!(evergreen_reg_safe_bm[i] & m))
-                       return true;
-       }
-       dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+       if (!(track->reg_safe_bm[i] & m))
+               return true;
+
        return false;
 }
 
@@ -1795,7 +1773,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 {
        struct radeon_bo_list *reloc;
        struct evergreen_cs_track *track;
-       volatile u32 *ib;
+       uint32_t *ib;
        unsigned idx;
        unsigned i;
        unsigned start_reg, end_reg, reg;
@@ -2321,9 +2299,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
                        return -EINVAL;
                }
-               for (i = 0; i < pkt->count; i++) {
-                       reg = start_reg + (4 * i);
-                       r = evergreen_cs_check_reg(p, reg, idx+1+i);
+               for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
+                       if (evergreen_is_safe_reg(p, reg))
+                               continue;
+                       r = evergreen_cs_handle_reg(p, reg, idx);
                        if (r)
                                return r;
                }
@@ -2337,9 +2316,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
                        return -EINVAL;
                }
-               for (i = 0; i < pkt->count; i++) {
-                       reg = start_reg + (4 * i);
-                       r = evergreen_cs_check_reg(p, reg, idx+1+i);
+               for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
+                       if (evergreen_is_safe_reg(p, reg))
+                               continue;
+                       r = evergreen_cs_handle_reg(p, reg, idx);
                        if (r)
                                return r;
                }
@@ -2594,8 +2574,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                } else {
                        /* SRC is a reg. */
                        reg = radeon_get_ib_value(p, idx+1) << 2;
-                       if (!evergreen_is_safe_reg(p, reg, idx+1))
+                       if (!evergreen_is_safe_reg(p, reg)) {
+                               dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+                                        reg, idx + 1);
                                return -EINVAL;
+                       }
                }
                if (idx_value & 0x2) {
                        u64 offset;
@@ -2618,8 +2601,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                } else {
                        /* DST is a reg. */
                        reg = radeon_get_ib_value(p, idx+3) << 2;
-                       if (!evergreen_is_safe_reg(p, reg, idx+3))
+                       if (!evergreen_is_safe_reg(p, reg)) {
+                               dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+                                        reg, idx + 3);
                                return -EINVAL;
+                       }
                }
                break;
        case PACKET3_NOP:
@@ -2644,11 +2630,15 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                if (track == NULL)
                        return -ENOMEM;
                evergreen_cs_track_init(track);
-               if (p->rdev->family >= CHIP_CAYMAN)
+               if (p->rdev->family >= CHIP_CAYMAN) {
                        tmp = p->rdev->config.cayman.tile_config;
-               else
+                       track->reg_safe_bm = cayman_reg_safe_bm;
+               } else {
                        tmp = p->rdev->config.evergreen.tile_config;
-
+                       track->reg_safe_bm = evergreen_reg_safe_bm;
+               }
+               BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
+               BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
                switch (tmp & 0xf) {
                case 0:
                        track->npipes = 1;
@@ -2757,7 +2747,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
        struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
        struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
        u32 header, cmd, count, sub_cmd;
-       volatile u32 *ib = p->ib.ptr;
+       uint32_t *ib = p->ib.ptr;
        u32 idx;
        u64 src_offset, dst_offset, dst2_offset;
        int r;
index 77e9d07c55b6701cbdb0a55bcbc9b68954e5c0d7..59acd0e5c2c6384705fdda0a6b89e65be5f5abc4 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
-#include <linux/vga_switcheroo.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
index f2421bc3e901904d8fbe92deffb9e98d033de62b..1d4d4520a0ac27a6b4944a9d7abcfdb4f209ec07 100644 (file)
@@ -31,7 +31,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
 #include <linux/vgaarb.h>
-#include <linux/vga_switcheroo.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "radeon_asic.h"
index 8bc7d0bbd3c80a94529cea79bf4f078d7951a87a..a771b9f0bf98d60e51f1c15f3f0b69f7832b08ca 100644 (file)
@@ -535,7 +535,7 @@ static bool radeon_atpx_detect(void)
 
        if (has_atpx && vga_count == 2) {
                acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+               printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                radeon_atpx_priv.atpx_detected = true;
                return true;
index d27e4ccb848c9c60e8a14f71336b790b125d2231..21b6732425c50d4b368d6b0fb016ea507a8aa6a1 100644 (file)
@@ -30,7 +30,6 @@
 #include "radeon.h"
 #include "atom.h"
 
-#include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 /*
index f3f562f6d848d17d3cc4e48d2701f5de5a14a46a..c566993a2ec3bcbe393ff14d079b0134f0426c7d 100644 (file)
@@ -1197,7 +1197,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
  * radeon_switcheroo_set_state - set switcheroo state
  *
  * @pdev: pci dev pointer
- * @state: vga switcheroo state
+ * @state: vga_switcheroo state
  *
  * Callback for the switcheroo driver.  Suspends or resumes the
  * the asics before or after it is powered up using ACPI methods.
index fd9da282b29c818d16cb84f7b78c8f1e4163f383..977f1a5e11a52c90fb9f044a5b8a7e9266f642f4 100644 (file)
@@ -602,7 +602,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  *
  * @dev: drm dev pointer
  *
- * Switch vga switcheroo state after last close (all asics).
+ * Switch vga_switcheroo state after last close (all asics).
  */
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {