2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "adreno_gpu.h"
24 #define RB_SIZE SZ_32K
27 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
29 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
32 case MSM_PARAM_GPU_ID:
33 *value = adreno_gpu->info->revn;
35 case MSM_PARAM_GMEM_SIZE:
36 *value = adreno_gpu->gmem;
38 case MSM_PARAM_CHIP_ID:
39 *value = adreno_gpu->rev.patchid |
40 (adreno_gpu->rev.minor << 8) |
41 (adreno_gpu->rev.major << 16) |
42 (adreno_gpu->rev.core << 24);
44 case MSM_PARAM_MAX_FREQ:
45 *value = adreno_gpu->base.fast_rate;
48 DBG("%s: invalid param: %u", gpu->name, param);
53 #define rbmemptr(adreno_gpu, member) \
54 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
56 int adreno_hw_init(struct msm_gpu *gpu)
58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
63 ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
66 dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
70 /* Setup REG_CP_RB_CNTL: */
71 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
72 /* size is log2(quad-words): */
73 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
74 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
76 /* Setup ringbuffer address: */
77 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
78 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
79 rbmemptr(adreno_gpu, rptr));
81 /* Setup scratch/timestamp: */
82 adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
83 rbmemptr(adreno_gpu, fence));
85 adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
90 static uint32_t get_wptr(struct msm_ringbuffer *ring)
92 return ring->cur - ring->start;
95 uint32_t adreno_last_fence(struct msm_gpu *gpu)
97 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
98 return adreno_gpu->memptrs->fence;
101 void adreno_recover(struct msm_gpu *gpu)
103 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
104 struct drm_device *dev = gpu->dev;
107 gpu->funcs->pm_suspend(gpu);
109 /* reset ringbuffer: */
110 gpu->rb->cur = gpu->rb->start;
112 /* reset completed fence seqno, just discard anything pending: */
113 adreno_gpu->memptrs->fence = gpu->submitted_fence;
114 adreno_gpu->memptrs->rptr = 0;
115 adreno_gpu->memptrs->wptr = 0;
117 gpu->funcs->pm_resume(gpu);
118 ret = gpu->funcs->hw_init(gpu);
120 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
125 int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
126 struct msm_file_private *ctx)
128 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
129 struct msm_drm_private *priv = gpu->dev->dev_private;
130 struct msm_ringbuffer *ring = gpu->rb;
133 for (i = 0; i < submit->nr_cmds; i++) {
134 switch (submit->cmd[i].type) {
135 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
136 /* ignore IB-targets */
138 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
139 /* ignore if there has not been a ctx switch: */
140 if (priv->lastctx == ctx)
142 case MSM_SUBMIT_CMD_BUF:
143 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
144 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
145 OUT_RING(ring, submit->cmd[i].iova);
146 OUT_RING(ring, submit->cmd[i].size);
152 /* on a320, at least, we seem to need to pad things out to an
153 * even number of qwords to avoid issue w/ CP hanging on wrap-
159 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
160 OUT_RING(ring, submit->fence);
162 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
163 /* Flush HLSQ lazy updates to make sure there is nothing
164 * pending for indirect loads after the timestamp has
167 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
168 OUT_RING(ring, HLSQ_FLUSH);
170 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
171 OUT_RING(ring, 0x00000000);
174 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
175 OUT_RING(ring, CACHE_FLUSH_TS);
176 OUT_RING(ring, rbmemptr(adreno_gpu, fence));
177 OUT_RING(ring, submit->fence);
179 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
180 OUT_PKT3(ring, CP_INTERRUPT, 1);
181 OUT_RING(ring, 0x80000000);
183 /* Workaround for missing irq issue on 8x16/a306. Unsure if the
184 * root cause is a platform issue or some a306 quirk, but this
185 * keeps things humming along:
187 if (adreno_is_a306(adreno_gpu)) {
188 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
189 OUT_RING(ring, 0x00000000);
190 OUT_PKT3(ring, CP_INTERRUPT, 1);
191 OUT_RING(ring, 0x80000000);
195 if (adreno_is_a3xx(adreno_gpu)) {
196 /* Dummy set-constant to trigger context rollover */
197 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
198 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
199 OUT_RING(ring, 0x00000000);
203 gpu->funcs->flush(gpu);
208 void adreno_flush(struct msm_gpu *gpu)
210 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
211 uint32_t wptr = get_wptr(gpu->rb);
213 /* ensure writes to ringbuffer have hit system memory: */
216 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
219 void adreno_idle(struct msm_gpu *gpu)
221 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
222 uint32_t wptr = get_wptr(gpu->rb);
224 /* wait for CP to drain ringbuffer: */
225 if (spin_until(adreno_gpu->memptrs->rptr == wptr))
226 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
228 /* TODO maybe we need to reset GPU here to recover from hang? */
231 #ifdef CONFIG_DEBUG_FS
232 void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
234 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
237 seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
238 adreno_gpu->info->revn, adreno_gpu->rev.core,
239 adreno_gpu->rev.major, adreno_gpu->rev.minor,
240 adreno_gpu->rev.patchid);
242 seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
243 gpu->submitted_fence);
244 seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
245 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
246 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
248 gpu->funcs->pm_resume(gpu);
250 /* dump these out in a form that can be parsed by demsm: */
251 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
252 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
253 uint32_t start = adreno_gpu->registers[i];
254 uint32_t end = adreno_gpu->registers[i+1];
257 for (addr = start; addr <= end; addr++) {
258 uint32_t val = gpu_read(gpu, addr);
259 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
263 gpu->funcs->pm_suspend(gpu);
267 /* Dump common gpu status and scratch registers on any hang, to make
268 * the hangcheck logs more useful. The scratch registers seem always
269 * safe to read when GPU has hung (unlike some other regs, depending
270 * on how the GPU hung), and they are useful to match up to cmdstream
271 * dumps when debugging hangs:
273 void adreno_dump_info(struct msm_gpu *gpu)
275 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
278 printk("revision: %d (%d.%d.%d.%d)\n",
279 adreno_gpu->info->revn, adreno_gpu->rev.core,
280 adreno_gpu->rev.major, adreno_gpu->rev.minor,
281 adreno_gpu->rev.patchid);
283 printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
284 gpu->submitted_fence);
285 printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
286 printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
287 printk("rb wptr: %d\n", get_wptr(gpu->rb));
289 for (i = 0; i < 8; i++) {
290 printk("CP_SCRATCH_REG%d: %u\n", i,
291 gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
295 /* would be nice to not have to duplicate the _show() stuff with printk(): */
296 void adreno_dump(struct msm_gpu *gpu)
298 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
301 /* dump these out in a form that can be parsed by demsm: */
302 printk("IO:region %s 00000000 00020000\n", gpu->name);
303 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
304 uint32_t start = adreno_gpu->registers[i];
305 uint32_t end = adreno_gpu->registers[i+1];
308 for (addr = start; addr <= end; addr++) {
309 uint32_t val = gpu_read(gpu, addr);
310 printk("IO:R %08x %08x\n", addr<<2, val);
315 static uint32_t ring_freewords(struct msm_gpu *gpu)
317 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
318 uint32_t size = gpu->rb->size / 4;
319 uint32_t wptr = get_wptr(gpu->rb);
320 uint32_t rptr = adreno_gpu->memptrs->rptr;
321 return (rptr + (size - 1) - wptr) % size;
324 void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
326 if (spin_until(ring_freewords(gpu) >= ndwords))
327 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
330 static const char *iommu_ports[] = {
331 "gfx3d_user", "gfx3d_priv",
332 "gfx3d1_user", "gfx3d1_priv",
335 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
336 struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
338 struct adreno_platform_config *config = pdev->dev.platform_data;
339 struct msm_gpu *gpu = &adreno_gpu->base;
343 adreno_gpu->funcs = funcs;
344 adreno_gpu->info = adreno_info(config->rev);
345 adreno_gpu->gmem = adreno_gpu->info->gmem;
346 adreno_gpu->revn = adreno_gpu->info->revn;
347 adreno_gpu->rev = config->rev;
349 gpu->fast_rate = config->fast_rate;
350 gpu->slow_rate = config->slow_rate;
351 gpu->bus_freq = config->bus_freq;
352 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
353 gpu->bus_scale_table = config->bus_scale_table;
356 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
357 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
359 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
360 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
365 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
367 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
368 adreno_gpu->info->pm4fw, ret);
372 ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
374 dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
375 adreno_gpu->info->pfpfw, ret);
381 ret = mmu->funcs->attach(mmu, iommu_ports,
382 ARRAY_SIZE(iommu_ports));
387 mutex_lock(&drm->struct_mutex);
388 adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
390 mutex_unlock(&drm->struct_mutex);
391 if (IS_ERR(adreno_gpu->memptrs_bo)) {
392 ret = PTR_ERR(adreno_gpu->memptrs_bo);
393 adreno_gpu->memptrs_bo = NULL;
394 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
398 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
399 if (!adreno_gpu->memptrs) {
400 dev_err(drm->dev, "could not vmap memptrs\n");
404 ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
405 &adreno_gpu->memptrs_iova);
407 dev_err(drm->dev, "could not map memptrs: %d\n", ret);
414 void adreno_gpu_cleanup(struct adreno_gpu *gpu)
416 if (gpu->memptrs_bo) {
417 if (gpu->memptrs_iova)
418 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
419 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
421 release_firmware(gpu->pm4);
422 release_firmware(gpu->pfp);
423 msm_gpu_cleanup(&gpu->base);