]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/msm/adreno/a5xx_gpu.c
cpufreq: intel_pstate: Add support for Gemini Lake
[karo-tx-linux.git] / drivers / gpu / drm / msm / adreno / a5xx_gpu.c
1 /* Copyright (c) 2016 The Linux Foundation. All rights reserved.
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License version 2 and
5  * only version 2 as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  */
13
14 #include "msm_gem.h"
15 #include "msm_mmu.h"
16 #include "a5xx_gpu.h"
17
18 extern bool hang_debug;
19 static void a5xx_dump(struct msm_gpu *gpu);
20
21 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
22         struct msm_file_private *ctx)
23 {
24         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
25         struct msm_drm_private *priv = gpu->dev->dev_private;
26         struct msm_ringbuffer *ring = gpu->rb;
27         unsigned int i, ibs = 0;
28
29         for (i = 0; i < submit->nr_cmds; i++) {
30                 switch (submit->cmd[i].type) {
31                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
32                         break;
33                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
34                         if (priv->lastctx == ctx)
35                                 break;
36                 case MSM_SUBMIT_CMD_BUF:
37                         OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
38                         OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
39                         OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
40                         OUT_RING(ring, submit->cmd[i].size);
41                         ibs++;
42                         break;
43                 }
44         }
45
46         OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
47         OUT_RING(ring, submit->fence->seqno);
48
49         OUT_PKT7(ring, CP_EVENT_WRITE, 4);
50         OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
51         OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
52         OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
53         OUT_RING(ring, submit->fence->seqno);
54
55         gpu->funcs->flush(gpu);
56 }
57
58 struct a5xx_hwcg {
59         u32 offset;
60         u32 value;
61 };
62
63 static const struct a5xx_hwcg a530_hwcg[] = {
64         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
65         {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
66         {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
67         {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
68         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
69         {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
70         {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
71         {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
72         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
73         {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
74         {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
75         {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
76         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
77         {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
78         {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
79         {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
80         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
81         {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
82         {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
83         {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
84         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
85         {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
86         {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
87         {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
88         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
89         {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
90         {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
91         {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
92         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
93         {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
94         {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
95         {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
96         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
97         {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
98         {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
99         {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
100         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
101         {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
102         {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
103         {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
104         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
105         {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
106         {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
107         {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
108         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
109         {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
110         {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
111         {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
112         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
113         {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
114         {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
115         {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
116         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
117         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
118         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
119         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
120         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
121         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
122         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
123         {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
124         {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
125         {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
126         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
127         {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
128         {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
129         {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
130         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
131         {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
132         {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
133         {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
134         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
135         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
136         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
137         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
138         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
139         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
140         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
141         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
142         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
143         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
144         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
145         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
146         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
147         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
148         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
149         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
150         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
151         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
152         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
153         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
154         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
155         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
156 };
157
158 static const struct {
159         int (*test)(struct adreno_gpu *gpu);
160         const struct a5xx_hwcg *regs;
161         unsigned int count;
162 } a5xx_hwcg_regs[] = {
163         { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
164 };
165
166 static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
167                 const struct a5xx_hwcg *regs, unsigned int count)
168 {
169         unsigned int i;
170
171         for (i = 0; i < count; i++)
172                 gpu_write(gpu, regs[i].offset, regs[i].value);
173
174         gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
175         gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
176 }
177
178 static void a5xx_enable_hwcg(struct msm_gpu *gpu)
179 {
180         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
181         unsigned int i;
182
183         for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
184                 if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
185                         _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
186                                 a5xx_hwcg_regs[i].count);
187                         return;
188                 }
189         }
190 }
191
192 static int a5xx_me_init(struct msm_gpu *gpu)
193 {
194         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
195         struct msm_ringbuffer *ring = gpu->rb;
196
197         OUT_PKT7(ring, CP_ME_INIT, 8);
198
199         OUT_RING(ring, 0x0000002F);
200
201         /* Enable multiple hardware contexts */
202         OUT_RING(ring, 0x00000003);
203
204         /* Enable error detection */
205         OUT_RING(ring, 0x20000000);
206
207         /* Don't enable header dump */
208         OUT_RING(ring, 0x00000000);
209         OUT_RING(ring, 0x00000000);
210
211         /* Specify workarounds for various microcode issues */
212         if (adreno_is_a530(adreno_gpu)) {
213                 /* Workaround for token end syncs
214                  * Force a WFI after every direct-render 3D mode draw and every
215                  * 2D mode 3 draw
216                  */
217                 OUT_RING(ring, 0x0000000B);
218         } else {
219                 /* No workarounds enabled */
220                 OUT_RING(ring, 0x00000000);
221         }
222
223         OUT_RING(ring, 0x00000000);
224         OUT_RING(ring, 0x00000000);
225
226         gpu->funcs->flush(gpu);
227
228         return gpu->funcs->idle(gpu) ? 0 : -EINVAL;
229 }
230
231 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
232                 const struct firmware *fw, u64 *iova)
233 {
234         struct drm_device *drm = gpu->dev;
235         struct drm_gem_object *bo;
236         void *ptr;
237
238         mutex_lock(&drm->struct_mutex);
239         bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
240         mutex_unlock(&drm->struct_mutex);
241
242         if (IS_ERR(bo))
243                 return bo;
244
245         ptr = msm_gem_get_vaddr(bo);
246         if (!ptr) {
247                 drm_gem_object_unreference_unlocked(bo);
248                 return ERR_PTR(-ENOMEM);
249         }
250
251         if (iova) {
252                 int ret = msm_gem_get_iova(bo, gpu->id, iova);
253
254                 if (ret) {
255                         drm_gem_object_unreference_unlocked(bo);
256                         return ERR_PTR(ret);
257                 }
258         }
259
260         memcpy(ptr, &fw->data[4], fw->size - 4);
261
262         msm_gem_put_vaddr(bo);
263         return bo;
264 }
265
266 static int a5xx_ucode_init(struct msm_gpu *gpu)
267 {
268         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
269         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
270         int ret;
271
272         if (!a5xx_gpu->pm4_bo) {
273                 a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
274                         &a5xx_gpu->pm4_iova);
275
276                 if (IS_ERR(a5xx_gpu->pm4_bo)) {
277                         ret = PTR_ERR(a5xx_gpu->pm4_bo);
278                         a5xx_gpu->pm4_bo = NULL;
279                         dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
280                                 ret);
281                         return ret;
282                 }
283         }
284
285         if (!a5xx_gpu->pfp_bo) {
286                 a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
287                         &a5xx_gpu->pfp_iova);
288
289                 if (IS_ERR(a5xx_gpu->pfp_bo)) {
290                         ret = PTR_ERR(a5xx_gpu->pfp_bo);
291                         a5xx_gpu->pfp_bo = NULL;
292                         dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
293                                 ret);
294                         return ret;
295                 }
296         }
297
298         gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
299                 REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
300
301         gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
302                 REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
303
304         return 0;
305 }
306
307 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
308           A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
309           A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
310           A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
311           A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
312           A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
313           A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
314           A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
315           A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
316           A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
317
318 static int a5xx_hw_init(struct msm_gpu *gpu)
319 {
320         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
321         int ret;
322
323         gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
324
325         /* Make all blocks contribute to the GPU BUSY perf counter */
326         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
327
328         /* Enable RBBM error reporting bits */
329         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
330
331         if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
332                 /*
333                  * Mask out the activity signals from RB1-3 to avoid false
334                  * positives
335                  */
336
337                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
338                         0xF0000000);
339                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
340                         0xFFFFFFFF);
341                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
342                         0xFFFFFFFF);
343                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
344                         0xFFFFFFFF);
345                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
346                         0xFFFFFFFF);
347                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
348                         0xFFFFFFFF);
349                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
350                         0xFFFFFFFF);
351                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
352                         0xFFFFFFFF);
353         }
354
355         /* Enable fault detection */
356         gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
357                 (1 << 30) | 0xFFFF);
358
359         /* Turn on performance counters */
360         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
361
362         /* Increase VFD cache access so LRZ and other data gets evicted less */
363         gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
364
365         /* Disable L2 bypass in the UCHE */
366         gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
367         gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
368         gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
369         gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
370
371         /* Set the GMEM VA range (0 to gpu->gmem) */
372         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
373         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
374         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
375                 0x00100000 + adreno_gpu->gmem - 1);
376         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
377
378         gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
379         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
380         gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
381         gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
382
383         gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
384
385         if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
386                 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
387
388         gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
389
390         /* Enable USE_RETENTION_FLOPS */
391         gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
392
393         /* Enable ME/PFP split notification */
394         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
395
396         /* Enable HWCG */
397         a5xx_enable_hwcg(gpu);
398
399         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
400
401         /* Set the highest bank bit */
402         gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
403         gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
404
405         /* Protect registers from the CP */
406         gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
407
408         /* RBBM */
409         gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
410         gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
411         gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
412         gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
413         gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
414         gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
415
416         /* Content protect */
417         gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
418                 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
419                         16));
420         gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
421                 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
422
423         /* CP */
424         gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
425         gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
426         gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
427         gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
428
429         /* RB */
430         gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
431         gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
432
433         /* VPC */
434         gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
435         gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
436
437         /* UCHE */
438         gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
439
440         if (adreno_is_a530(adreno_gpu))
441                 gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
442                         ADRENO_PROTECT_RW(0x10000, 0x8000));
443
444         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
445         /*
446          * Disable the trusted memory range - we don't actually supported secure
447          * memory rendering at this point in time and we don't want to block off
448          * part of the virtual memory space.
449          */
450         gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
451                 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
452         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
453
454         /* Load the GPMU firmware before starting the HW init */
455         a5xx_gpmu_ucode_init(gpu);
456
457         ret = adreno_hw_init(gpu);
458         if (ret)
459                 return ret;
460
461         ret = a5xx_ucode_init(gpu);
462         if (ret)
463                 return ret;
464
465         /* Disable the interrupts through the initial bringup stage */
466         gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
467
468         /* Clear ME_HALT to start the micro engine */
469         gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
470         ret = a5xx_me_init(gpu);
471         if (ret)
472                 return ret;
473
474         ret = a5xx_power_init(gpu);
475         if (ret)
476                 return ret;
477
478         /*
479          * Send a pipeline event stat to get misbehaving counters to start
480          * ticking correctly
481          */
482         if (adreno_is_a530(adreno_gpu)) {
483                 OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
484                 OUT_RING(gpu->rb, 0x0F);
485
486                 gpu->funcs->flush(gpu);
487                 if (!gpu->funcs->idle(gpu))
488                         return -EINVAL;
489         }
490
491         /* Put the GPU into unsecure mode */
492         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
493
494         return 0;
495 }
496
497 static void a5xx_recover(struct msm_gpu *gpu)
498 {
499         int i;
500
501         adreno_dump_info(gpu);
502
503         for (i = 0; i < 8; i++) {
504                 printk("CP_SCRATCH_REG%d: %u\n", i,
505                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
506         }
507
508         if (hang_debug)
509                 a5xx_dump(gpu);
510
511         gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
512         gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
513         gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
514         adreno_recover(gpu);
515 }
516
517 static void a5xx_destroy(struct msm_gpu *gpu)
518 {
519         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
520         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
521
522         DBG("%s", gpu->name);
523
524         if (a5xx_gpu->pm4_bo) {
525                 if (a5xx_gpu->pm4_iova)
526                         msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id);
527                 drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
528         }
529
530         if (a5xx_gpu->pfp_bo) {
531                 if (a5xx_gpu->pfp_iova)
532                         msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id);
533                 drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
534         }
535
536         if (a5xx_gpu->gpmu_bo) {
537                 if (a5xx_gpu->gpmu_bo)
538                         msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
539                 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
540         }
541
542         adreno_gpu_cleanup(adreno_gpu);
543         kfree(a5xx_gpu);
544 }
545
546 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
547 {
548         if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
549                 return false;
550
551         /*
552          * Nearly every abnormality ends up pausing the GPU and triggering a
553          * fault so we can safely just watch for this one interrupt to fire
554          */
555         return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
556                 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
557 }
558
559 static bool a5xx_idle(struct msm_gpu *gpu)
560 {
561         /* wait for CP to drain ringbuffer: */
562         if (!adreno_idle(gpu))
563                 return false;
564
565         if (spin_until(_a5xx_check_idle(gpu))) {
566                 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
567                         gpu->name, __builtin_return_address(0),
568                         gpu_read(gpu, REG_A5XX_RBBM_STATUS),
569                         gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
570
571                 return false;
572         }
573
574         return true;
575 }
576
577 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
578 {
579         struct msm_gpu *gpu = arg;
580         pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
581                         iova, flags,
582                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
583                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
584                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
585                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
586
587         return -EFAULT;
588 }
589
590 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
591 {
592         u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
593
594         if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
595                 u32 val;
596
597                 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
598
599                 /*
600                  * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
601                  * read it twice
602                  */
603
604                 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
605                 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
606
607                 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
608                         val);
609         }
610
611         if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
612                 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
613                         gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
614
615         if (status & A5XX_CP_INT_CP_DMA_ERROR)
616                 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
617
618         if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
619                 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
620
621                 dev_err_ratelimited(gpu->dev->dev,
622                         "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
623                         val & (1 << 24) ? "WRITE" : "READ",
624                         (val & 0xFFFFF) >> 2, val);
625         }
626
627         if (status & A5XX_CP_INT_CP_AHB_ERROR) {
628                 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
629                 const char *access[16] = { "reserved", "reserved",
630                         "timestamp lo", "timestamp hi", "pfp read", "pfp write",
631                         "", "", "me read", "me write", "", "", "crashdump read",
632                         "crashdump write" };
633
634                 dev_err_ratelimited(gpu->dev->dev,
635                         "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
636                         status & 0xFFFFF, access[(status >> 24) & 0xF],
637                         (status & (1 << 31)), status);
638         }
639 }
640
641 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
642 {
643         u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
644
645         if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
646                 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
647
648                 dev_err_ratelimited(gpu->dev->dev,
649                         "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
650                         val & (1 << 28) ? "WRITE" : "READ",
651                         (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
652                         (val >> 24) & 0xF);
653
654                 /* Clear the error */
655                 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
656         }
657
658         if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
659                 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
660
661         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
662                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
663                         gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
664
665         if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
666                 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
667                         gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
668
669         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
670                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
671                         gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
672
673         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
674                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
675
676         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
677                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
678 }
679
680 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
681 {
682         uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
683
684         addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
685
686         dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
687                 addr);
688 }
689
690 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
691 {
692         dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
693 }
694
695 #define RBBM_ERROR_MASK \
696         (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
697         A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
698         A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
699         A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
700         A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
701         A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
702
703 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
704 {
705         u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
706
707         gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
708
709         if (status & RBBM_ERROR_MASK)
710                 a5xx_rbbm_err_irq(gpu);
711
712         if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
713                 a5xx_cp_err_irq(gpu);
714
715         if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
716                 a5xx_uche_err_irq(gpu);
717
718         if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
719                 a5xx_gpmu_err_irq(gpu);
720
721         if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
722                 msm_gpu_retire(gpu);
723
724         return IRQ_HANDLED;
725 }
726
727 static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
728         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
729         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
730         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
731         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
732                 REG_A5XX_CP_RB_RPTR_ADDR_HI),
733         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
734         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
735         REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
736 };
737
738 static const u32 a5xx_registers[] = {
739         0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
740         0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
741         0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
742         0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
743         0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
744         0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
745         0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
746         0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
747         0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
748         0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
749         0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
750         0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
751         0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
752         0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
753         0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
754         0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
755         0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
756         0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
757         0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
758         0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
759         0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
760         0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
761         0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
762         0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
763         0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
764         0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
765         0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
766         ~0
767 };
768
769 static void a5xx_dump(struct msm_gpu *gpu)
770 {
771         dev_info(gpu->dev->dev, "status:   %08x\n",
772                 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
773         adreno_dump(gpu);
774 }
775
776 static int a5xx_pm_resume(struct msm_gpu *gpu)
777 {
778         int ret;
779
780         /* Turn on the core power */
781         ret = msm_gpu_pm_resume(gpu);
782         if (ret)
783                 return ret;
784
785         /* Turn the RBCCU domain first to limit the chances of voltage droop */
786         gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
787
788         /* Wait 3 usecs before polling */
789         udelay(3);
790
791         ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
792                 (1 << 20), (1 << 20));
793         if (ret) {
794                 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
795                         gpu->name,
796                         gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
797                 return ret;
798         }
799
800         /* Turn on the SP domain */
801         gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
802         ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
803                 (1 << 20), (1 << 20));
804         if (ret)
805                 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
806                         gpu->name);
807
808         return ret;
809 }
810
811 static int a5xx_pm_suspend(struct msm_gpu *gpu)
812 {
813         /* Clear the VBIF pipe before shutting down */
814         gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
815         spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
816
817         gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
818
819         /*
820          * Reset the VBIF before power collapse to avoid issue with FIFO
821          * entries
822          */
823         gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
824         gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
825
826         return msm_gpu_pm_suspend(gpu);
827 }
828
829 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
830 {
831         *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
832                 REG_A5XX_RBBM_PERFCTR_CP_0_HI);
833
834         return 0;
835 }
836
837 #ifdef CONFIG_DEBUG_FS
838 static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
839 {
840         gpu->funcs->pm_resume(gpu);
841
842         seq_printf(m, "status:   %08x\n",
843                         gpu_read(gpu, REG_A5XX_RBBM_STATUS));
844         gpu->funcs->pm_suspend(gpu);
845
846         adreno_show(gpu, m);
847 }
848 #endif
849
850 static const struct adreno_gpu_funcs funcs = {
851         .base = {
852                 .get_param = adreno_get_param,
853                 .hw_init = a5xx_hw_init,
854                 .pm_suspend = a5xx_pm_suspend,
855                 .pm_resume = a5xx_pm_resume,
856                 .recover = a5xx_recover,
857                 .last_fence = adreno_last_fence,
858                 .submit = a5xx_submit,
859                 .flush = adreno_flush,
860                 .idle = a5xx_idle,
861                 .irq = a5xx_irq,
862                 .destroy = a5xx_destroy,
863                 .show = a5xx_show,
864         },
865         .get_timestamp = a5xx_get_timestamp,
866 };
867
868 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
869 {
870         struct msm_drm_private *priv = dev->dev_private;
871         struct platform_device *pdev = priv->gpu_pdev;
872         struct a5xx_gpu *a5xx_gpu = NULL;
873         struct adreno_gpu *adreno_gpu;
874         struct msm_gpu *gpu;
875         int ret;
876
877         if (!pdev) {
878                 dev_err(dev->dev, "No A5XX device is defined\n");
879                 return ERR_PTR(-ENXIO);
880         }
881
882         a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
883         if (!a5xx_gpu)
884                 return ERR_PTR(-ENOMEM);
885
886         adreno_gpu = &a5xx_gpu->base;
887         gpu = &adreno_gpu->base;
888
889         a5xx_gpu->pdev = pdev;
890         adreno_gpu->registers = a5xx_registers;
891         adreno_gpu->reg_offsets = a5xx_register_offsets;
892
893         a5xx_gpu->lm_leakage = 0x4E001A;
894
895         ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
896         if (ret) {
897                 a5xx_destroy(&(a5xx_gpu->base.base));
898                 return ERR_PTR(ret);
899         }
900
901         if (gpu->aspace)
902                 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
903
904         return gpu;
905 }