2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kernel.h>
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
43 struct r600_cs_track {
44 /* configuration we miror so that we use same code btw kms/ums */
52 u32 cb_color_base_last[8];
53 struct radeon_bo *cb_color_bo[8];
54 u64 cb_color_bo_mc[8];
55 u64 cb_color_bo_offset[8];
56 struct radeon_bo *cb_color_frag_bo[8];
57 u64 cb_color_frag_offset[8];
58 struct radeon_bo *cb_color_tile_bo[8];
59 u64 cb_color_tile_offset[8];
63 u32 cb_color_size_idx[8]; /* unused */
65 u32 cb_shader_mask; /* unused */
69 u32 vgt_strmout_buffer_en;
70 struct radeon_bo *vgt_strmout_bo[4];
71 u64 vgt_strmout_bo_mc[4]; /* unused */
72 u32 vgt_strmout_bo_offset[4];
73 u32 vgt_strmout_size[4];
76 u32 db_depth_size_idx;
80 struct radeon_bo *db_bo;
82 bool sx_misc_kill_all_prims;
86 struct radeon_bo *htile_bo;
91 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
92 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
93 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 }
94 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
95 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 }
96 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
97 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
98 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
102 unsigned blockheight;
104 unsigned valid_color;
105 enum radeon_family min_family;
108 static const struct gpu_formats color_formats_table[] = {
110 FMT_8_BIT(V_038004_COLOR_8, 1),
111 FMT_8_BIT(V_038004_COLOR_4_4, 1),
112 FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
113 FMT_8_BIT(V_038004_FMT_1, 0),
116 FMT_16_BIT(V_038004_COLOR_16, 1),
117 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
118 FMT_16_BIT(V_038004_COLOR_8_8, 1),
119 FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
120 FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
121 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
122 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
123 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
126 FMT_24_BIT(V_038004_FMT_8_8_8),
129 FMT_32_BIT(V_038004_COLOR_32, 1),
130 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
131 FMT_32_BIT(V_038004_COLOR_16_16, 1),
132 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
133 FMT_32_BIT(V_038004_COLOR_8_24, 1),
134 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
135 FMT_32_BIT(V_038004_COLOR_24_8, 1),
136 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
137 FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
138 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
139 FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
140 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
141 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
142 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
143 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
144 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
145 FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
146 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
149 FMT_48_BIT(V_038004_FMT_16_16_16),
150 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
153 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
154 FMT_64_BIT(V_038004_COLOR_32_32, 1),
155 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
156 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
157 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
159 FMT_96_BIT(V_038004_FMT_32_32_32),
160 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
163 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
164 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
166 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
167 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
169 /* block compressed formats */
170 [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
171 [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
172 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
173 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
174 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
175 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
176 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
178 /* The other Evergreen formats */
179 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
182 bool r600_fmt_is_valid_color(u32 format)
184 if (format >= ARRAY_SIZE(color_formats_table))
187 if (color_formats_table[format].valid_color)
193 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
195 if (format >= ARRAY_SIZE(color_formats_table))
198 if (family < color_formats_table[format].min_family)
201 if (color_formats_table[format].blockwidth > 0)
207 int r600_fmt_get_blocksize(u32 format)
209 if (format >= ARRAY_SIZE(color_formats_table))
212 return color_formats_table[format].blocksize;
215 int r600_fmt_get_nblocksx(u32 format, u32 w)
219 if (format >= ARRAY_SIZE(color_formats_table))
222 bw = color_formats_table[format].blockwidth;
226 return (w + bw - 1) / bw;
229 int r600_fmt_get_nblocksy(u32 format, u32 h)
233 if (format >= ARRAY_SIZE(color_formats_table))
236 bh = color_formats_table[format].blockheight;
240 return (h + bh - 1) / bh;
243 struct array_mode_checker {
252 /* returns alignment in pixels for pitch/height/depth and bytes for base */
253 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
261 u32 macro_tile_width = values->nbanks;
262 u32 macro_tile_height = values->npipes;
263 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
264 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
266 switch (values->array_mode) {
267 case ARRAY_LINEAR_GENERAL:
268 /* technically tile_width/_height for pitch/height */
269 *pitch_align = 1; /* tile_width */
270 *height_align = 1; /* tile_height */
274 case ARRAY_LINEAR_ALIGNED:
275 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
278 *base_align = values->group_size;
280 case ARRAY_1D_TILED_THIN1:
281 *pitch_align = max((u32)tile_width,
282 (u32)(values->group_size /
283 (tile_height * values->blocksize * values->nsamples)));
284 *height_align = tile_height;
286 *base_align = values->group_size;
288 case ARRAY_2D_TILED_THIN1:
289 *pitch_align = max((u32)macro_tile_width * tile_width,
290 (u32)((values->group_size * values->nbanks) /
291 (values->blocksize * values->nsamples * tile_width)));
292 *height_align = macro_tile_height * tile_height;
294 *base_align = max(macro_tile_bytes,
295 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
304 static void r600_cs_track_init(struct r600_cs_track *track)
308 /* assume DX9 mode */
309 track->sq_config = DX9_CONSTS;
310 for (i = 0; i < 8; i++) {
311 track->cb_color_base_last[i] = 0;
312 track->cb_color_size[i] = 0;
313 track->cb_color_size_idx[i] = 0;
314 track->cb_color_info[i] = 0;
315 track->cb_color_view[i] = 0xFFFFFFFF;
316 track->cb_color_bo[i] = NULL;
317 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
318 track->cb_color_bo_mc[i] = 0xFFFFFFFF;
319 track->cb_color_frag_bo[i] = NULL;
320 track->cb_color_frag_offset[i] = 0xFFFFFFFF;
321 track->cb_color_tile_bo[i] = NULL;
322 track->cb_color_tile_offset[i] = 0xFFFFFFFF;
323 track->cb_color_mask[i] = 0xFFFFFFFF;
325 track->is_resolve = false;
326 track->nsamples = 16;
327 track->log_nsamples = 4;
328 track->cb_target_mask = 0xFFFFFFFF;
329 track->cb_shader_mask = 0xFFFFFFFF;
330 track->cb_dirty = true;
332 track->db_bo_mc = 0xFFFFFFFF;
333 /* assume the biggest format and that htile is enabled */
334 track->db_depth_info = 7 | (1 << 25);
335 track->db_depth_view = 0xFFFFC000;
336 track->db_depth_size = 0xFFFFFFFF;
337 track->db_depth_size_idx = 0;
338 track->db_depth_control = 0xFFFFFFFF;
339 track->db_dirty = true;
340 track->htile_bo = NULL;
341 track->htile_offset = 0xFFFFFFFF;
342 track->htile_surface = 0;
344 for (i = 0; i < 4; i++) {
345 track->vgt_strmout_size[i] = 0;
346 track->vgt_strmout_bo[i] = NULL;
347 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
348 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
350 track->streamout_dirty = true;
351 track->sx_misc_kill_all_prims = false;
354 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
356 struct r600_cs_track *track = p->track;
357 u32 slice_tile_max, size, tmp;
358 u32 height, height_align, pitch, pitch_align, depth_align;
359 u64 base_offset, base_align;
360 struct array_mode_checker array_check;
361 volatile u32 *ib = p->ib.ptr;
364 /* When resolve is used, the second colorbuffer has always 1 sample. */
365 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
367 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
368 format = G_0280A0_FORMAT(track->cb_color_info[i]);
369 if (!r600_fmt_is_valid_color(format)) {
370 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
371 __func__, __LINE__, format,
372 i, track->cb_color_info[i]);
375 /* pitch in pixels */
376 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
377 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
378 slice_tile_max *= 64;
379 height = slice_tile_max / pitch;
382 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
384 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
385 array_check.array_mode = array_mode;
386 array_check.group_size = track->group_size;
387 array_check.nbanks = track->nbanks;
388 array_check.npipes = track->npipes;
389 array_check.nsamples = nsamples;
390 array_check.blocksize = r600_fmt_get_blocksize(format);
391 if (r600_get_array_mode_alignment(&array_check,
392 &pitch_align, &height_align, &depth_align, &base_align)) {
393 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
394 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
395 track->cb_color_info[i]);
398 switch (array_mode) {
399 case V_0280A0_ARRAY_LINEAR_GENERAL:
401 case V_0280A0_ARRAY_LINEAR_ALIGNED:
403 case V_0280A0_ARRAY_1D_TILED_THIN1:
404 /* avoid breaking userspace */
408 case V_0280A0_ARRAY_2D_TILED_THIN1:
411 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
412 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
413 track->cb_color_info[i]);
417 if (!IS_ALIGNED(pitch, pitch_align)) {
418 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
419 __func__, __LINE__, pitch, pitch_align, array_mode);
422 if (!IS_ALIGNED(height, height_align)) {
423 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
424 __func__, __LINE__, height, height_align, array_mode);
427 if (!IS_ALIGNED(base_offset, base_align)) {
428 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
429 base_offset, base_align, array_mode);
434 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
435 r600_fmt_get_blocksize(format) * nsamples;
436 switch (array_mode) {
438 case V_0280A0_ARRAY_LINEAR_GENERAL:
439 case V_0280A0_ARRAY_LINEAR_ALIGNED:
440 tmp += track->cb_color_view[i] & 0xFF;
442 case V_0280A0_ARRAY_1D_TILED_THIN1:
443 case V_0280A0_ARRAY_2D_TILED_THIN1:
444 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
447 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
448 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
449 /* the initial DDX does bad things with the CB size occasionally */
450 /* it rounds up height too far for slice tile max but the BO is smaller */
451 /* r600c,g also seem to flush at bad times in some apps resulting in
452 * bogus values here. So for linear just allow anything to avoid breaking
456 dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
457 __func__, i, array_mode,
458 track->cb_color_bo_offset[i], tmp,
459 radeon_bo_size(track->cb_color_bo[i]),
460 pitch, height, r600_fmt_get_nblocksx(format, pitch),
461 r600_fmt_get_nblocksy(format, height),
462 r600_fmt_get_blocksize(format));
467 tmp = (height * pitch) >> 6;
468 if (tmp < slice_tile_max)
469 slice_tile_max = tmp;
470 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
471 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
472 ib[track->cb_color_size_idx[i]] = tmp;
475 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
476 case V_0280A0_TILE_DISABLE:
478 case V_0280A0_FRAG_ENABLE:
479 if (track->nsamples > 1) {
480 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
481 /* the tile size is 8x8, but the size is in units of bits.
482 * for bytes, do just * 8. */
483 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
485 if (bytes + track->cb_color_frag_offset[i] >
486 radeon_bo_size(track->cb_color_frag_bo[i])) {
487 dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
488 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
489 __func__, tile_max, bytes,
490 track->cb_color_frag_offset[i],
491 radeon_bo_size(track->cb_color_frag_bo[i]));
496 case V_0280A0_CLEAR_ENABLE:
498 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
499 /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
500 * (128*128) / (8*8) / 2 = 128 bytes per block. */
501 uint32_t bytes = (block_max + 1) * 128;
503 if (bytes + track->cb_color_tile_offset[i] >
504 radeon_bo_size(track->cb_color_tile_bo[i])) {
505 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
506 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
507 __func__, block_max, bytes,
508 track->cb_color_tile_offset[i],
509 radeon_bo_size(track->cb_color_tile_bo[i]));
515 dev_warn(p->dev, "%s invalid tile mode\n", __func__);
521 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
523 struct r600_cs_track *track = p->track;
524 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
525 u32 height_align, pitch_align, depth_align;
528 u64 base_offset, base_align;
529 struct array_mode_checker array_check;
531 volatile u32 *ib = p->ib.ptr;
534 if (track->db_bo == NULL) {
535 dev_warn(p->dev, "z/stencil with no depth buffer\n");
538 switch (G_028010_FORMAT(track->db_depth_info)) {
539 case V_028010_DEPTH_16:
542 case V_028010_DEPTH_X8_24:
543 case V_028010_DEPTH_8_24:
544 case V_028010_DEPTH_X8_24_FLOAT:
545 case V_028010_DEPTH_8_24_FLOAT:
546 case V_028010_DEPTH_32_FLOAT:
549 case V_028010_DEPTH_X24_8_32_FLOAT:
553 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
556 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
557 if (!track->db_depth_size_idx) {
558 dev_warn(p->dev, "z/stencil buffer size not set\n");
561 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
562 tmp = (tmp / bpe) >> 6;
564 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
565 track->db_depth_size, bpe, track->db_offset,
566 radeon_bo_size(track->db_bo));
569 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
571 size = radeon_bo_size(track->db_bo);
572 /* pitch in pixels */
573 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
574 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
575 slice_tile_max *= 64;
576 height = slice_tile_max / pitch;
579 base_offset = track->db_bo_mc + track->db_offset;
580 array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
581 array_check.array_mode = array_mode;
582 array_check.group_size = track->group_size;
583 array_check.nbanks = track->nbanks;
584 array_check.npipes = track->npipes;
585 array_check.nsamples = track->nsamples;
586 array_check.blocksize = bpe;
587 if (r600_get_array_mode_alignment(&array_check,
588 &pitch_align, &height_align, &depth_align, &base_align)) {
589 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
590 G_028010_ARRAY_MODE(track->db_depth_info),
591 track->db_depth_info);
594 switch (array_mode) {
595 case V_028010_ARRAY_1D_TILED_THIN1:
596 /* don't break userspace */
599 case V_028010_ARRAY_2D_TILED_THIN1:
602 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
603 G_028010_ARRAY_MODE(track->db_depth_info),
604 track->db_depth_info);
608 if (!IS_ALIGNED(pitch, pitch_align)) {
609 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
610 __func__, __LINE__, pitch, pitch_align, array_mode);
613 if (!IS_ALIGNED(height, height_align)) {
614 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
615 __func__, __LINE__, height, height_align, array_mode);
618 if (!IS_ALIGNED(base_offset, base_align)) {
619 dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
620 base_offset, base_align, array_mode);
624 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
625 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
626 tmp = ntiles * bpe * 64 * nviews * track->nsamples;
627 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
628 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
630 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
631 radeon_bo_size(track->db_bo));
637 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
641 if (track->htile_bo == NULL) {
642 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
643 __func__, __LINE__, track->db_depth_info);
646 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
647 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
648 __func__, __LINE__, track->db_depth_size);
654 if (G_028D24_LINEAR(track->htile_surface)) {
655 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
656 nbx = round_up(nbx, 16 * 8);
657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
658 nby = round_up(nby, track->npipes * 8);
660 /* htile widht & nby (8 or 4) make 2 bits number */
661 tmp = track->htile_surface & 3;
662 /* align is htile align * 8, htile align vary according to
663 * number of pipe and tile width and nby
665 switch (track->npipes) {
668 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
669 nbx = round_up(nbx, 64 * 8);
670 nby = round_up(nby, 64 * 8);
672 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
673 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
674 nbx = round_up(nbx, 64 * 8);
675 nby = round_up(nby, 32 * 8);
677 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
678 nbx = round_up(nbx, 32 * 8);
679 nby = round_up(nby, 32 * 8);
687 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
688 nbx = round_up(nbx, 64 * 8);
689 nby = round_up(nby, 32 * 8);
691 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
692 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
693 nbx = round_up(nbx, 32 * 8);
694 nby = round_up(nby, 32 * 8);
696 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
697 nbx = round_up(nbx, 32 * 8);
698 nby = round_up(nby, 16 * 8);
706 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
707 nbx = round_up(nbx, 32 * 8);
708 nby = round_up(nby, 32 * 8);
710 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
711 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
712 nbx = round_up(nbx, 32 * 8);
713 nby = round_up(nby, 16 * 8);
715 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
716 nbx = round_up(nbx, 16 * 8);
717 nby = round_up(nby, 16 * 8);
725 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
726 nbx = round_up(nbx, 32 * 8);
727 nby = round_up(nby, 16 * 8);
729 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
730 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
731 nbx = round_up(nbx, 16 * 8);
732 nby = round_up(nby, 16 * 8);
734 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
735 nbx = round_up(nbx, 16 * 8);
736 nby = round_up(nby, 8 * 8);
743 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
744 __func__, __LINE__, track->npipes);
748 /* compute number of htile */
749 nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
750 nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
751 size = nbx * nby * 4;
752 size += track->htile_offset;
754 if (size > radeon_bo_size(track->htile_bo)) {
755 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
756 __func__, __LINE__, radeon_bo_size(track->htile_bo),
762 track->db_dirty = false;
766 static int r600_cs_track_check(struct radeon_cs_parser *p)
768 struct r600_cs_track *track = p->track;
772 /* on legacy kernel we don't perform advanced check */
776 /* check streamout */
777 if (track->streamout_dirty && track->vgt_strmout_en) {
778 for (i = 0; i < 4; i++) {
779 if (track->vgt_strmout_buffer_en & (1 << i)) {
780 if (track->vgt_strmout_bo[i]) {
781 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
782 (u64)track->vgt_strmout_size[i];
783 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
784 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
786 radeon_bo_size(track->vgt_strmout_bo[i]));
790 dev_warn(p->dev, "No buffer for streamout %d\n", i);
795 track->streamout_dirty = false;
798 if (track->sx_misc_kill_all_prims)
801 /* check that we have a cb for each enabled target, we don't check
802 * shader_mask because it seems mesa isn't always setting it :(
804 if (track->cb_dirty) {
805 tmp = track->cb_target_mask;
807 /* We must check both colorbuffers for RESOLVE. */
808 if (track->is_resolve) {
812 for (i = 0; i < 8; i++) {
813 if ((tmp >> (i * 4)) & 0xF) {
814 /* at least one component is enabled */
815 if (track->cb_color_bo[i] == NULL) {
816 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
817 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
820 /* perform rewrite of CB_COLOR[0-7]_SIZE */
821 r = r600_cs_track_validate_cb(p, i);
826 track->cb_dirty = false;
829 /* Check depth buffer */
830 if (track->db_dirty &&
831 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
832 (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
833 G_028800_Z_ENABLE(track->db_depth_control))) {
834 r = r600_cs_track_validate_db(p);
843 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
844 * @parser: parser structure holding parsing context.
845 * @pkt: where to store packet informations
847 * Assume that chunk_ib_index is properly set. Will return -EINVAL
848 * if packet is bigger than remaining ib size. or if packets is unknown.
850 int r600_cs_packet_parse(struct radeon_cs_parser *p,
851 struct radeon_cs_packet *pkt,
854 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
857 if (idx >= ib_chunk->length_dw) {
858 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
859 idx, ib_chunk->length_dw);
862 header = radeon_get_ib_value(p, idx);
864 pkt->type = CP_PACKET_GET_TYPE(header);
865 pkt->count = CP_PACKET_GET_COUNT(header);
869 pkt->reg = CP_PACKET0_GET_REG(header);
872 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
878 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
881 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
882 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
883 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
890 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
891 * @parser: parser structure holding parsing context.
892 * @data: pointer to relocation data
893 * @offset_start: starting offset
894 * @offset_mask: offset mask (to align start offset on)
895 * @reloc: reloc informations
897 * Check next packet is relocation packet3, do bo validation and compute
898 * GPU offset using the provided start.
900 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
901 struct radeon_cs_reloc **cs_reloc)
903 struct radeon_cs_chunk *relocs_chunk;
904 struct radeon_cs_packet p3reloc;
908 if (p->chunk_relocs_idx == -1) {
909 DRM_ERROR("No relocation chunk !\n");
913 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
914 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
918 p->idx += p3reloc.count + 2;
919 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
920 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
924 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
925 if (idx >= relocs_chunk->length_dw) {
926 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
927 idx, relocs_chunk->length_dw);
930 /* FIXME: we assume reloc size is 4 dwords */
931 *cs_reloc = p->relocs_ptr[(idx / 4)];
936 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
937 * @parser: parser structure holding parsing context.
938 * @data: pointer to relocation data
939 * @offset_start: starting offset
940 * @offset_mask: offset mask (to align start offset on)
941 * @reloc: reloc informations
943 * Check next packet is relocation packet3, do bo validation and compute
944 * GPU offset using the provided start.
946 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
947 struct radeon_cs_reloc **cs_reloc)
949 struct radeon_cs_chunk *relocs_chunk;
950 struct radeon_cs_packet p3reloc;
954 if (p->chunk_relocs_idx == -1) {
955 DRM_ERROR("No relocation chunk !\n");
959 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
960 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
964 p->idx += p3reloc.count + 2;
965 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
966 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
970 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
971 if (idx >= relocs_chunk->length_dw) {
972 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
973 idx, relocs_chunk->length_dw);
976 *cs_reloc = p->relocs;
977 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
978 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
983 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
984 * @parser: parser structure holding parsing context.
986 * Check next packet is relocation packet3, do bo validation and compute
987 * GPU offset using the provided start.
989 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
991 struct radeon_cs_packet p3reloc;
994 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
998 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1005 * r600_cs_packet_next_vline() - parse userspace VLINE packet
1006 * @parser: parser structure holding parsing context.
1008 * Userspace sends a special sequence for VLINE waits.
1009 * PACKET0 - VLINE_START_END + value
1010 * PACKET3 - WAIT_REG_MEM poll vline status reg
1011 * RELOC (P3) - crtc_id in reloc.
1013 * This function parses this and relocates the VLINE START END
1014 * and WAIT_REG_MEM packets to the correct crtc.
1015 * It also detects a switched off crtc and nulls out the
1016 * wait in that case.
1018 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
1020 struct drm_mode_object *obj;
1021 struct drm_crtc *crtc;
1022 struct radeon_crtc *radeon_crtc;
1023 struct radeon_cs_packet p3reloc, wait_reg_mem;
1026 uint32_t header, h_idx, reg, wait_reg_mem_info;
1027 volatile uint32_t *ib;
1031 /* parse the WAIT_REG_MEM */
1032 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
1036 /* check its a WAIT_REG_MEM */
1037 if (wait_reg_mem.type != PACKET_TYPE3 ||
1038 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
1039 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
1043 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
1044 /* bit 4 is reg (0) or mem (1) */
1045 if (wait_reg_mem_info & 0x10) {
1046 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
1049 /* waiting for value to be equal */
1050 if ((wait_reg_mem_info & 0x7) != 0x3) {
1051 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
1054 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
1055 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1059 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
1060 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1064 /* jump over the NOP */
1065 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1070 p->idx += wait_reg_mem.count + 2;
1071 p->idx += p3reloc.count + 2;
1073 header = radeon_get_ib_value(p, h_idx);
1074 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1075 reg = CP_PACKET0_GET_REG(header);
1077 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1079 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1082 crtc = obj_to_crtc(obj);
1083 radeon_crtc = to_radeon_crtc(crtc);
1084 crtc_id = radeon_crtc->crtc_id;
1086 if (!crtc->enabled) {
1087 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1088 ib[h_idx + 2] = PACKET2(0);
1089 ib[h_idx + 3] = PACKET2(0);
1090 ib[h_idx + 4] = PACKET2(0);
1091 ib[h_idx + 5] = PACKET2(0);
1092 ib[h_idx + 6] = PACKET2(0);
1093 ib[h_idx + 7] = PACKET2(0);
1094 ib[h_idx + 8] = PACKET2(0);
1095 } else if (crtc_id == 1) {
1097 case AVIVO_D1MODE_VLINE_START_END:
1098 header &= ~R600_CP_PACKET0_REG_MASK;
1099 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1102 DRM_ERROR("unknown crtc reloc\n");
1106 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
1112 static int r600_packet0_check(struct radeon_cs_parser *p,
1113 struct radeon_cs_packet *pkt,
1114 unsigned idx, unsigned reg)
1119 case AVIVO_D1MODE_VLINE_START_END:
1120 r = r600_cs_packet_parse_vline(p);
1122 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1128 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1135 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
1136 struct radeon_cs_packet *pkt)
1144 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1145 r = r600_packet0_check(p, pkt, idx, reg);
1154 * r600_cs_check_reg() - check if register is authorized or not
1155 * @parser: parser structure holding parsing context
1156 * @reg: register we are testing
1157 * @idx: index into the cs buffer
1159 * This function will test against r600_reg_safe_bm and return 0
1160 * if register is safe. If register is not flag as safe this function
1161 * will test it against a list of register needind special handling.
1163 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1165 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
1166 struct radeon_cs_reloc *reloc;
1171 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1172 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1175 m = 1 << ((reg >> 2) & 31);
1176 if (!(r600_reg_safe_bm[i] & m))
1180 /* force following reg to 0 in an attempt to disable out buffer
1181 * which will need us to better understand how it works to perform
1182 * security check on it (Jerome)
1184 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
1185 case R_008C44_SQ_ESGS_RING_SIZE:
1186 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
1187 case R_008C54_SQ_ESTMP_RING_SIZE:
1188 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
1189 case R_008C74_SQ_FBUF_RING_SIZE:
1190 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
1191 case R_008C5C_SQ_GSTMP_RING_SIZE:
1192 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1193 case R_008C4C_SQ_GSVS_RING_SIZE:
1194 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1195 case R_008C6C_SQ_PSTMP_RING_SIZE:
1196 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1197 case R_008C7C_SQ_REDUC_RING_SIZE:
1198 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1199 case R_008C64_SQ_VSTMP_RING_SIZE:
1200 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1201 /* get value to populate the IB don't remove */
1202 tmp =radeon_get_ib_value(p, idx);
1206 track->sq_config = radeon_get_ib_value(p, idx);
1208 case R_028800_DB_DEPTH_CONTROL:
1209 track->db_depth_control = radeon_get_ib_value(p, idx);
1210 track->db_dirty = true;
1212 case R_028010_DB_DEPTH_INFO:
1213 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1214 r600_cs_packet_next_is_pkt3_nop(p)) {
1215 r = r600_cs_packet_next_reloc(p, &reloc);
1217 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1221 track->db_depth_info = radeon_get_ib_value(p, idx);
1222 ib[idx] &= C_028010_ARRAY_MODE;
1223 track->db_depth_info &= C_028010_ARRAY_MODE;
1224 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1225 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1226 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1228 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1229 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1232 track->db_depth_info = radeon_get_ib_value(p, idx);
1234 track->db_dirty = true;
1236 case R_028004_DB_DEPTH_VIEW:
1237 track->db_depth_view = radeon_get_ib_value(p, idx);
1238 track->db_dirty = true;
1240 case R_028000_DB_DEPTH_SIZE:
1241 track->db_depth_size = radeon_get_ib_value(p, idx);
1242 track->db_depth_size_idx = idx;
1243 track->db_dirty = true;
1245 case R_028AB0_VGT_STRMOUT_EN:
1246 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1247 track->streamout_dirty = true;
1249 case R_028B20_VGT_STRMOUT_BUFFER_EN:
1250 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1251 track->streamout_dirty = true;
1253 case VGT_STRMOUT_BUFFER_BASE_0:
1254 case VGT_STRMOUT_BUFFER_BASE_1:
1255 case VGT_STRMOUT_BUFFER_BASE_2:
1256 case VGT_STRMOUT_BUFFER_BASE_3:
1257 r = r600_cs_packet_next_reloc(p, &reloc);
1259 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1263 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1264 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1265 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1266 track->vgt_strmout_bo[tmp] = reloc->robj;
1267 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1268 track->streamout_dirty = true;
1270 case VGT_STRMOUT_BUFFER_SIZE_0:
1271 case VGT_STRMOUT_BUFFER_SIZE_1:
1272 case VGT_STRMOUT_BUFFER_SIZE_2:
1273 case VGT_STRMOUT_BUFFER_SIZE_3:
1274 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1275 /* size in register is DWs, convert to bytes */
1276 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1277 track->streamout_dirty = true;
1280 r = r600_cs_packet_next_reloc(p, &reloc);
1282 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1286 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1288 case R_028238_CB_TARGET_MASK:
1289 track->cb_target_mask = radeon_get_ib_value(p, idx);
1290 track->cb_dirty = true;
1292 case R_02823C_CB_SHADER_MASK:
1293 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1295 case R_028C04_PA_SC_AA_CONFIG:
1296 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1297 track->log_nsamples = tmp;
1298 track->nsamples = 1 << tmp;
1299 track->cb_dirty = true;
1301 case R_028808_CB_COLOR_CONTROL:
1302 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1303 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1304 track->cb_dirty = true;
1306 case R_0280A0_CB_COLOR0_INFO:
1307 case R_0280A4_CB_COLOR1_INFO:
1308 case R_0280A8_CB_COLOR2_INFO:
1309 case R_0280AC_CB_COLOR3_INFO:
1310 case R_0280B0_CB_COLOR4_INFO:
1311 case R_0280B4_CB_COLOR5_INFO:
1312 case R_0280B8_CB_COLOR6_INFO:
1313 case R_0280BC_CB_COLOR7_INFO:
1314 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1315 r600_cs_packet_next_is_pkt3_nop(p)) {
1316 r = r600_cs_packet_next_reloc(p, &reloc);
1318 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1321 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1322 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1323 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1324 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1325 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1326 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1327 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1328 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1331 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1332 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1334 track->cb_dirty = true;
1336 case R_028080_CB_COLOR0_VIEW:
1337 case R_028084_CB_COLOR1_VIEW:
1338 case R_028088_CB_COLOR2_VIEW:
1339 case R_02808C_CB_COLOR3_VIEW:
1340 case R_028090_CB_COLOR4_VIEW:
1341 case R_028094_CB_COLOR5_VIEW:
1342 case R_028098_CB_COLOR6_VIEW:
1343 case R_02809C_CB_COLOR7_VIEW:
1344 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1345 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1346 track->cb_dirty = true;
1348 case R_028060_CB_COLOR0_SIZE:
1349 case R_028064_CB_COLOR1_SIZE:
1350 case R_028068_CB_COLOR2_SIZE:
1351 case R_02806C_CB_COLOR3_SIZE:
1352 case R_028070_CB_COLOR4_SIZE:
1353 case R_028074_CB_COLOR5_SIZE:
1354 case R_028078_CB_COLOR6_SIZE:
1355 case R_02807C_CB_COLOR7_SIZE:
1356 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1357 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1358 track->cb_color_size_idx[tmp] = idx;
1359 track->cb_dirty = true;
1361 /* This register were added late, there is userspace
1362 * which does provide relocation for those but set
1363 * 0 offset. In order to avoid breaking old userspace
1364 * we detect this and set address to point to last
1365 * CB_COLOR0_BASE, note that if userspace doesn't set
1366 * CB_COLOR0_BASE before this register we will report
1367 * error. Old userspace always set CB_COLOR0_BASE
1368 * before any of this.
1370 case R_0280E0_CB_COLOR0_FRAG:
1371 case R_0280E4_CB_COLOR1_FRAG:
1372 case R_0280E8_CB_COLOR2_FRAG:
1373 case R_0280EC_CB_COLOR3_FRAG:
1374 case R_0280F0_CB_COLOR4_FRAG:
1375 case R_0280F4_CB_COLOR5_FRAG:
1376 case R_0280F8_CB_COLOR6_FRAG:
1377 case R_0280FC_CB_COLOR7_FRAG:
1378 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1379 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1380 if (!track->cb_color_base_last[tmp]) {
1381 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1384 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1385 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1386 ib[idx] = track->cb_color_base_last[tmp];
1388 r = r600_cs_packet_next_reloc(p, &reloc);
1390 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1393 track->cb_color_frag_bo[tmp] = reloc->robj;
1394 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1395 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1397 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1398 track->cb_dirty = true;
1401 case R_0280C0_CB_COLOR0_TILE:
1402 case R_0280C4_CB_COLOR1_TILE:
1403 case R_0280C8_CB_COLOR2_TILE:
1404 case R_0280CC_CB_COLOR3_TILE:
1405 case R_0280D0_CB_COLOR4_TILE:
1406 case R_0280D4_CB_COLOR5_TILE:
1407 case R_0280D8_CB_COLOR6_TILE:
1408 case R_0280DC_CB_COLOR7_TILE:
1409 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1410 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1411 if (!track->cb_color_base_last[tmp]) {
1412 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1415 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1416 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1417 ib[idx] = track->cb_color_base_last[tmp];
1419 r = r600_cs_packet_next_reloc(p, &reloc);
1421 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1424 track->cb_color_tile_bo[tmp] = reloc->robj;
1425 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1426 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1428 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1429 track->cb_dirty = true;
1432 case R_028100_CB_COLOR0_MASK:
1433 case R_028104_CB_COLOR1_MASK:
1434 case R_028108_CB_COLOR2_MASK:
1435 case R_02810C_CB_COLOR3_MASK:
1436 case R_028110_CB_COLOR4_MASK:
1437 case R_028114_CB_COLOR5_MASK:
1438 case R_028118_CB_COLOR6_MASK:
1439 case R_02811C_CB_COLOR7_MASK:
1440 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1441 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1442 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1443 track->cb_dirty = true;
1446 case CB_COLOR0_BASE:
1447 case CB_COLOR1_BASE:
1448 case CB_COLOR2_BASE:
1449 case CB_COLOR3_BASE:
1450 case CB_COLOR4_BASE:
1451 case CB_COLOR5_BASE:
1452 case CB_COLOR6_BASE:
1453 case CB_COLOR7_BASE:
1454 r = r600_cs_packet_next_reloc(p, &reloc);
1456 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1460 tmp = (reg - CB_COLOR0_BASE) / 4;
1461 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1462 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1463 track->cb_color_base_last[tmp] = ib[idx];
1464 track->cb_color_bo[tmp] = reloc->robj;
1465 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1466 track->cb_dirty = true;
1469 r = r600_cs_packet_next_reloc(p, &reloc);
1471 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1475 track->db_offset = radeon_get_ib_value(p, idx) << 8;
1476 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1477 track->db_bo = reloc->robj;
1478 track->db_bo_mc = reloc->lobj.gpu_offset;
1479 track->db_dirty = true;
1481 case DB_HTILE_DATA_BASE:
1482 r = r600_cs_packet_next_reloc(p, &reloc);
1484 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1488 track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1489 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1490 track->htile_bo = reloc->robj;
1491 track->db_dirty = true;
1493 case DB_HTILE_SURFACE:
1494 track->htile_surface = radeon_get_ib_value(p, idx);
1495 track->db_dirty = true;
1497 case SQ_PGM_START_FS:
1498 case SQ_PGM_START_ES:
1499 case SQ_PGM_START_VS:
1500 case SQ_PGM_START_GS:
1501 case SQ_PGM_START_PS:
1502 case SQ_ALU_CONST_CACHE_GS_0:
1503 case SQ_ALU_CONST_CACHE_GS_1:
1504 case SQ_ALU_CONST_CACHE_GS_2:
1505 case SQ_ALU_CONST_CACHE_GS_3:
1506 case SQ_ALU_CONST_CACHE_GS_4:
1507 case SQ_ALU_CONST_CACHE_GS_5:
1508 case SQ_ALU_CONST_CACHE_GS_6:
1509 case SQ_ALU_CONST_CACHE_GS_7:
1510 case SQ_ALU_CONST_CACHE_GS_8:
1511 case SQ_ALU_CONST_CACHE_GS_9:
1512 case SQ_ALU_CONST_CACHE_GS_10:
1513 case SQ_ALU_CONST_CACHE_GS_11:
1514 case SQ_ALU_CONST_CACHE_GS_12:
1515 case SQ_ALU_CONST_CACHE_GS_13:
1516 case SQ_ALU_CONST_CACHE_GS_14:
1517 case SQ_ALU_CONST_CACHE_GS_15:
1518 case SQ_ALU_CONST_CACHE_PS_0:
1519 case SQ_ALU_CONST_CACHE_PS_1:
1520 case SQ_ALU_CONST_CACHE_PS_2:
1521 case SQ_ALU_CONST_CACHE_PS_3:
1522 case SQ_ALU_CONST_CACHE_PS_4:
1523 case SQ_ALU_CONST_CACHE_PS_5:
1524 case SQ_ALU_CONST_CACHE_PS_6:
1525 case SQ_ALU_CONST_CACHE_PS_7:
1526 case SQ_ALU_CONST_CACHE_PS_8:
1527 case SQ_ALU_CONST_CACHE_PS_9:
1528 case SQ_ALU_CONST_CACHE_PS_10:
1529 case SQ_ALU_CONST_CACHE_PS_11:
1530 case SQ_ALU_CONST_CACHE_PS_12:
1531 case SQ_ALU_CONST_CACHE_PS_13:
1532 case SQ_ALU_CONST_CACHE_PS_14:
1533 case SQ_ALU_CONST_CACHE_PS_15:
1534 case SQ_ALU_CONST_CACHE_VS_0:
1535 case SQ_ALU_CONST_CACHE_VS_1:
1536 case SQ_ALU_CONST_CACHE_VS_2:
1537 case SQ_ALU_CONST_CACHE_VS_3:
1538 case SQ_ALU_CONST_CACHE_VS_4:
1539 case SQ_ALU_CONST_CACHE_VS_5:
1540 case SQ_ALU_CONST_CACHE_VS_6:
1541 case SQ_ALU_CONST_CACHE_VS_7:
1542 case SQ_ALU_CONST_CACHE_VS_8:
1543 case SQ_ALU_CONST_CACHE_VS_9:
1544 case SQ_ALU_CONST_CACHE_VS_10:
1545 case SQ_ALU_CONST_CACHE_VS_11:
1546 case SQ_ALU_CONST_CACHE_VS_12:
1547 case SQ_ALU_CONST_CACHE_VS_13:
1548 case SQ_ALU_CONST_CACHE_VS_14:
1549 case SQ_ALU_CONST_CACHE_VS_15:
1550 r = r600_cs_packet_next_reloc(p, &reloc);
1552 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1556 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1558 case SX_MEMORY_EXPORT_BASE:
1559 r = r600_cs_packet_next_reloc(p, &reloc);
1561 dev_warn(p->dev, "bad SET_CONFIG_REG "
1565 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1568 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1571 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1577 unsigned r600_mip_minify(unsigned size, unsigned level)
1581 val = max(1U, size >> level);
1583 val = roundup_pow_of_two(val);
1587 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1588 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1589 unsigned block_align, unsigned height_align, unsigned base_align,
1590 unsigned *l0_size, unsigned *mipmap_size)
1592 unsigned offset, i, level;
1593 unsigned width, height, depth, size;
1596 unsigned nlevels = llevel - blevel + 1;
1599 blocksize = r600_fmt_get_blocksize(format);
1601 w0 = r600_mip_minify(w0, 0);
1602 h0 = r600_mip_minify(h0, 0);
1603 d0 = r600_mip_minify(d0, 0);
1604 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1605 width = r600_mip_minify(w0, i);
1606 nbx = r600_fmt_get_nblocksx(format, width);
1608 nbx = round_up(nbx, block_align);
1610 height = r600_mip_minify(h0, i);
1611 nby = r600_fmt_get_nblocksy(format, height);
1612 nby = round_up(nby, height_align);
1614 depth = r600_mip_minify(d0, i);
1616 size = nbx * nby * blocksize * nsamples;
1625 if (i == 0 || i == 1)
1626 offset = round_up(offset, base_align);
1630 *mipmap_size = offset;
1632 *mipmap_size = *l0_size;
1634 *mipmap_size -= *l0_size;
1638 * r600_check_texture_resource() - check if register is authorized or not
1639 * @p: parser structure holding parsing context
1640 * @idx: index into the cs buffer
1641 * @texture: texture's bo structure
1642 * @mipmap: mipmap's bo structure
1644 * This function will check that the resource has valid field and that
1645 * the texture and mipmap bo object are big enough to cover this resource.
1647 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1648 struct radeon_bo *texture,
1649 struct radeon_bo *mipmap,
1654 struct r600_cs_track *track = p->track;
1655 u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1656 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1657 u32 height_align, pitch, pitch_align, depth_align;
1660 struct array_mode_checker array_check;
1664 /* on legacy kernel we don't perform advanced check */
1665 if (p->rdev == NULL)
1668 /* convert to bytes */
1672 word0 = radeon_get_ib_value(p, idx + 0);
1673 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1674 if (tiling_flags & RADEON_TILING_MACRO)
1675 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1676 else if (tiling_flags & RADEON_TILING_MICRO)
1677 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1679 word1 = radeon_get_ib_value(p, idx + 1);
1680 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1681 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1682 word4 = radeon_get_ib_value(p, idx + 4);
1683 word5 = radeon_get_ib_value(p, idx + 5);
1684 dim = G_038000_DIM(word0);
1685 w0 = G_038000_TEX_WIDTH(word0) + 1;
1686 pitch = (G_038000_PITCH(word0) + 1) * 8;
1687 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1688 d0 = G_038004_TEX_DEPTH(word1);
1689 format = G_038004_DATA_FORMAT(word1);
1690 blevel = G_038010_BASE_LEVEL(word4);
1691 llevel = G_038014_LAST_LEVEL(word5);
1692 /* pitch in texels */
1693 array_check.array_mode = G_038000_TILE_MODE(word0);
1694 array_check.group_size = track->group_size;
1695 array_check.nbanks = track->nbanks;
1696 array_check.npipes = track->npipes;
1697 array_check.nsamples = 1;
1698 array_check.blocksize = r600_fmt_get_blocksize(format);
1702 case V_038000_SQ_TEX_DIM_1D:
1703 case V_038000_SQ_TEX_DIM_2D:
1704 case V_038000_SQ_TEX_DIM_3D:
1706 case V_038000_SQ_TEX_DIM_CUBEMAP:
1707 if (p->family >= CHIP_RV770)
1712 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1713 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1716 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1719 case V_038000_SQ_TEX_DIM_2D_MSAA:
1720 array_check.nsamples = 1 << llevel;
1724 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1727 if (!r600_fmt_is_valid_texture(format, p->family)) {
1728 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1729 __func__, __LINE__, format);
1733 if (r600_get_array_mode_alignment(&array_check,
1734 &pitch_align, &height_align, &depth_align, &base_align)) {
1735 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1736 __func__, __LINE__, G_038000_TILE_MODE(word0));
1740 /* XXX check height as well... */
1742 if (!IS_ALIGNED(pitch, pitch_align)) {
1743 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1744 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1747 if (!IS_ALIGNED(base_offset, base_align)) {
1748 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1749 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1752 if (!IS_ALIGNED(mip_offset, base_align)) {
1753 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1754 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1758 if (blevel > llevel) {
1759 dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1763 barray = G_038014_BASE_ARRAY(word5);
1764 larray = G_038014_LAST_ARRAY(word5);
1766 nfaces = larray - barray + 1;
1768 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1769 pitch_align, height_align, base_align,
1770 &l0_size, &mipmap_size);
1771 /* using get ib will give us the offset into the texture bo */
1772 if ((l0_size + word2) > radeon_bo_size(texture)) {
1773 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1774 w0, h0, pitch_align, height_align,
1775 array_check.array_mode, format, word2,
1776 l0_size, radeon_bo_size(texture));
1777 dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1780 /* using get ib will give us the offset into the mipmap bo */
1781 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1782 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1783 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1788 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1793 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1794 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1797 m = 1 << ((reg >> 2) & 31);
1798 if (!(r600_reg_safe_bm[i] & m))
1800 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1804 static int r600_packet3_check(struct radeon_cs_parser *p,
1805 struct radeon_cs_packet *pkt)
1807 struct radeon_cs_reloc *reloc;
1808 struct r600_cs_track *track;
1812 unsigned start_reg, end_reg, reg;
1816 track = (struct r600_cs_track *)p->track;
1819 idx_value = radeon_get_ib_value(p, idx);
1821 switch (pkt->opcode) {
1822 case PACKET3_SET_PREDICATION:
1828 if (pkt->count != 1) {
1829 DRM_ERROR("bad SET PREDICATION\n");
1833 tmp = radeon_get_ib_value(p, idx + 1);
1834 pred_op = (tmp >> 16) & 0x7;
1836 /* for the clear predicate operation */
1841 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1845 r = r600_cs_packet_next_reloc(p, &reloc);
1847 DRM_ERROR("bad SET PREDICATION\n");
1851 offset = reloc->lobj.gpu_offset +
1852 (idx_value & 0xfffffff0) +
1853 ((u64)(tmp & 0xff) << 32);
1855 ib[idx + 0] = offset;
1856 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1860 case PACKET3_START_3D_CMDBUF:
1861 if (p->family >= CHIP_RV770 || pkt->count) {
1862 DRM_ERROR("bad START_3D\n");
1866 case PACKET3_CONTEXT_CONTROL:
1867 if (pkt->count != 1) {
1868 DRM_ERROR("bad CONTEXT_CONTROL\n");
1872 case PACKET3_INDEX_TYPE:
1873 case PACKET3_NUM_INSTANCES:
1875 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1879 case PACKET3_DRAW_INDEX:
1882 if (pkt->count != 3) {
1883 DRM_ERROR("bad DRAW_INDEX\n");
1886 r = r600_cs_packet_next_reloc(p, &reloc);
1888 DRM_ERROR("bad DRAW_INDEX\n");
1892 offset = reloc->lobj.gpu_offset +
1894 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1897 ib[idx+1] = upper_32_bits(offset) & 0xff;
1899 r = r600_cs_track_check(p);
1901 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1906 case PACKET3_DRAW_INDEX_AUTO:
1907 if (pkt->count != 1) {
1908 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1911 r = r600_cs_track_check(p);
1913 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1917 case PACKET3_DRAW_INDEX_IMMD_BE:
1918 case PACKET3_DRAW_INDEX_IMMD:
1919 if (pkt->count < 2) {
1920 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1923 r = r600_cs_track_check(p);
1925 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1929 case PACKET3_WAIT_REG_MEM:
1930 if (pkt->count != 5) {
1931 DRM_ERROR("bad WAIT_REG_MEM\n");
1934 /* bit 4 is reg (0) or mem (1) */
1935 if (idx_value & 0x10) {
1938 r = r600_cs_packet_next_reloc(p, &reloc);
1940 DRM_ERROR("bad WAIT_REG_MEM\n");
1944 offset = reloc->lobj.gpu_offset +
1945 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1946 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1948 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1949 ib[idx+2] = upper_32_bits(offset) & 0xff;
1952 case PACKET3_SURFACE_SYNC:
1953 if (pkt->count != 3) {
1954 DRM_ERROR("bad SURFACE_SYNC\n");
1957 /* 0xffffffff/0x0 is flush all cache flag */
1958 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1959 radeon_get_ib_value(p, idx + 2) != 0) {
1960 r = r600_cs_packet_next_reloc(p, &reloc);
1962 DRM_ERROR("bad SURFACE_SYNC\n");
1965 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1968 case PACKET3_EVENT_WRITE:
1969 if (pkt->count != 2 && pkt->count != 0) {
1970 DRM_ERROR("bad EVENT_WRITE\n");
1976 r = r600_cs_packet_next_reloc(p, &reloc);
1978 DRM_ERROR("bad EVENT_WRITE\n");
1981 offset = reloc->lobj.gpu_offset +
1982 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1983 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1985 ib[idx+1] = offset & 0xfffffff8;
1986 ib[idx+2] = upper_32_bits(offset) & 0xff;
1989 case PACKET3_EVENT_WRITE_EOP:
1993 if (pkt->count != 4) {
1994 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1997 r = r600_cs_packet_next_reloc(p, &reloc);
1999 DRM_ERROR("bad EVENT_WRITE\n");
2003 offset = reloc->lobj.gpu_offset +
2004 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2005 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2007 ib[idx+1] = offset & 0xfffffffc;
2008 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2011 case PACKET3_SET_CONFIG_REG:
2012 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
2013 end_reg = 4 * pkt->count + start_reg - 4;
2014 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
2015 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2016 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2017 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2020 for (i = 0; i < pkt->count; i++) {
2021 reg = start_reg + (4 * i);
2022 r = r600_cs_check_reg(p, reg, idx+1+i);
2027 case PACKET3_SET_CONTEXT_REG:
2028 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
2029 end_reg = 4 * pkt->count + start_reg - 4;
2030 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
2031 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2032 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2033 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2036 for (i = 0; i < pkt->count; i++) {
2037 reg = start_reg + (4 * i);
2038 r = r600_cs_check_reg(p, reg, idx+1+i);
2043 case PACKET3_SET_RESOURCE:
2044 if (pkt->count % 7) {
2045 DRM_ERROR("bad SET_RESOURCE\n");
2048 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
2049 end_reg = 4 * pkt->count + start_reg - 4;
2050 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
2051 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2052 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2053 DRM_ERROR("bad SET_RESOURCE\n");
2056 for (i = 0; i < (pkt->count / 7); i++) {
2057 struct radeon_bo *texture, *mipmap;
2058 u32 size, offset, base_offset, mip_offset;
2060 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
2061 case SQ_TEX_VTX_VALID_TEXTURE:
2063 r = r600_cs_packet_next_reloc(p, &reloc);
2065 DRM_ERROR("bad SET_RESOURCE\n");
2068 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2069 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2070 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
2071 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
2072 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
2073 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
2075 texture = reloc->robj;
2077 r = r600_cs_packet_next_reloc(p, &reloc);
2079 DRM_ERROR("bad SET_RESOURCE\n");
2082 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2083 mipmap = reloc->robj;
2084 r = r600_check_texture_resource(p, idx+(i*7)+1,
2086 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
2087 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
2088 reloc->lobj.tiling_flags);
2091 ib[idx+1+(i*7)+2] += base_offset;
2092 ib[idx+1+(i*7)+3] += mip_offset;
2094 case SQ_TEX_VTX_VALID_BUFFER:
2098 r = r600_cs_packet_next_reloc(p, &reloc);
2100 DRM_ERROR("bad SET_RESOURCE\n");
2103 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
2104 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
2105 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2106 /* force size to size of the buffer */
2107 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
2108 size + offset, radeon_bo_size(reloc->robj));
2109 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2112 offset64 = reloc->lobj.gpu_offset + offset;
2113 ib[idx+1+(i*8)+0] = offset64;
2114 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2115 (upper_32_bits(offset64) & 0xff);
2118 case SQ_TEX_VTX_INVALID_TEXTURE:
2119 case SQ_TEX_VTX_INVALID_BUFFER:
2121 DRM_ERROR("bad SET_RESOURCE\n");
2126 case PACKET3_SET_ALU_CONST:
2127 if (track->sq_config & DX9_CONSTS) {
2128 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2129 end_reg = 4 * pkt->count + start_reg - 4;
2130 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2131 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2132 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2133 DRM_ERROR("bad SET_ALU_CONST\n");
2138 case PACKET3_SET_BOOL_CONST:
2139 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2140 end_reg = 4 * pkt->count + start_reg - 4;
2141 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2142 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2143 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2144 DRM_ERROR("bad SET_BOOL_CONST\n");
2148 case PACKET3_SET_LOOP_CONST:
2149 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2150 end_reg = 4 * pkt->count + start_reg - 4;
2151 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2152 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2153 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2154 DRM_ERROR("bad SET_LOOP_CONST\n");
2158 case PACKET3_SET_CTL_CONST:
2159 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2160 end_reg = 4 * pkt->count + start_reg - 4;
2161 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2162 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2163 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2164 DRM_ERROR("bad SET_CTL_CONST\n");
2168 case PACKET3_SET_SAMPLER:
2169 if (pkt->count % 3) {
2170 DRM_ERROR("bad SET_SAMPLER\n");
2173 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2174 end_reg = 4 * pkt->count + start_reg - 4;
2175 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2176 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2177 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2178 DRM_ERROR("bad SET_SAMPLER\n");
2182 case PACKET3_STRMOUT_BASE_UPDATE:
2183 if (p->family < CHIP_RV770) {
2184 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2187 if (pkt->count != 1) {
2188 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2191 if (idx_value > 3) {
2192 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2198 r = r600_cs_packet_next_reloc(p, &reloc);
2200 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2204 if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2205 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2209 offset = radeon_get_ib_value(p, idx+1) << 8;
2210 if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2211 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2212 offset, track->vgt_strmout_bo_offset[idx_value]);
2216 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2217 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2218 offset + 4, radeon_bo_size(reloc->robj));
2221 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2224 case PACKET3_SURFACE_BASE_UPDATE:
2225 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2226 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2230 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2234 case PACKET3_STRMOUT_BUFFER_UPDATE:
2235 if (pkt->count != 4) {
2236 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2239 /* Updating memory at DST_ADDRESS. */
2240 if (idx_value & 0x1) {
2242 r = r600_cs_packet_next_reloc(p, &reloc);
2244 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2247 offset = radeon_get_ib_value(p, idx+1);
2248 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2249 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2250 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2251 offset + 4, radeon_bo_size(reloc->robj));
2254 offset += reloc->lobj.gpu_offset;
2256 ib[idx+2] = upper_32_bits(offset) & 0xff;
2258 /* Reading data from SRC_ADDRESS. */
2259 if (((idx_value >> 1) & 0x3) == 2) {
2261 r = r600_cs_packet_next_reloc(p, &reloc);
2263 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2266 offset = radeon_get_ib_value(p, idx+3);
2267 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2268 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2269 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2270 offset + 4, radeon_bo_size(reloc->robj));
2273 offset += reloc->lobj.gpu_offset;
2275 ib[idx+4] = upper_32_bits(offset) & 0xff;
2278 case PACKET3_COPY_DW:
2279 if (pkt->count != 4) {
2280 DRM_ERROR("bad COPY_DW (invalid count)\n");
2283 if (idx_value & 0x1) {
2285 /* SRC is memory. */
2286 r = r600_cs_packet_next_reloc(p, &reloc);
2288 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2291 offset = radeon_get_ib_value(p, idx+1);
2292 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2293 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2294 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2295 offset + 4, radeon_bo_size(reloc->robj));
2298 offset += reloc->lobj.gpu_offset;
2300 ib[idx+2] = upper_32_bits(offset) & 0xff;
2303 reg = radeon_get_ib_value(p, idx+1) << 2;
2304 if (!r600_is_safe_reg(p, reg, idx+1))
2307 if (idx_value & 0x2) {
2309 /* DST is memory. */
2310 r = r600_cs_packet_next_reloc(p, &reloc);
2312 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2315 offset = radeon_get_ib_value(p, idx+3);
2316 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2317 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2318 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2319 offset + 4, radeon_bo_size(reloc->robj));
2322 offset += reloc->lobj.gpu_offset;
2324 ib[idx+4] = upper_32_bits(offset) & 0xff;
2327 reg = radeon_get_ib_value(p, idx+3) << 2;
2328 if (!r600_is_safe_reg(p, reg, idx+3))
2335 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2341 int r600_cs_parse(struct radeon_cs_parser *p)
2343 struct radeon_cs_packet pkt;
2344 struct r600_cs_track *track;
2347 if (p->track == NULL) {
2348 /* initialize tracker, we are in kms */
2349 track = kzalloc(sizeof(*track), GFP_KERNEL);
2352 r600_cs_track_init(track);
2353 if (p->rdev->family < CHIP_RV770) {
2354 track->npipes = p->rdev->config.r600.tiling_npipes;
2355 track->nbanks = p->rdev->config.r600.tiling_nbanks;
2356 track->group_size = p->rdev->config.r600.tiling_group_size;
2357 } else if (p->rdev->family <= CHIP_RV740) {
2358 track->npipes = p->rdev->config.rv770.tiling_npipes;
2359 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2360 track->group_size = p->rdev->config.rv770.tiling_group_size;
2365 r = r600_cs_packet_parse(p, &pkt, p->idx);
2371 p->idx += pkt.count + 2;
2374 r = r600_cs_parse_packet0(p, &pkt);
2379 r = r600_packet3_check(p, &pkt);
2382 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2392 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2394 for (r = 0; r < p->ib.length_dw; r++) {
2395 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2404 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2406 if (p->chunk_relocs_idx == -1) {
2409 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2410 if (p->relocs == NULL) {
2417 * cs_parser_fini() - clean parser states
2418 * @parser: parser structure holding parsing context.
2419 * @error: error number
2421 * If error is set than unvalidate buffer, otherwise just free memory
2422 * used by parsing context.
2424 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2428 kfree(parser->relocs);
2429 for (i = 0; i < parser->nchunks; i++) {
2430 kfree(parser->chunks[i].kdata);
2431 kfree(parser->chunks[i].kpage[0]);
2432 kfree(parser->chunks[i].kpage[1]);
2434 kfree(parser->chunks);
2435 kfree(parser->chunks_array);
2438 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2439 unsigned family, u32 *ib, int *l)
2441 struct radeon_cs_parser parser;
2442 struct radeon_cs_chunk *ib_chunk;
2443 struct r600_cs_track *track;
2446 /* initialize tracker */
2447 track = kzalloc(sizeof(*track), GFP_KERNEL);
2450 r600_cs_track_init(track);
2451 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2452 /* initialize parser */
2453 memset(&parser, 0, sizeof(struct radeon_cs_parser));
2455 parser.dev = &dev->pdev->dev;
2457 parser.family = family;
2458 parser.track = track;
2460 r = radeon_cs_parser_init(&parser, data);
2462 DRM_ERROR("Failed to initialize parser !\n");
2463 r600_cs_parser_fini(&parser, r);
2466 r = r600_cs_parser_relocs_legacy(&parser);
2468 DRM_ERROR("Failed to parse relocation !\n");
2469 r600_cs_parser_fini(&parser, r);
2472 /* Copy the packet into the IB, the parser will read from the
2473 * input memory (cached) and write to the IB (which can be
2475 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2476 parser.ib.length_dw = ib_chunk->length_dw;
2477 *l = parser.ib.length_dw;
2478 r = r600_cs_parse(&parser);
2480 DRM_ERROR("Invalid command stream !\n");
2481 r600_cs_parser_fini(&parser, r);
2484 r = radeon_cs_finish_pages(&parser);
2486 DRM_ERROR("Invalid command stream !\n");
2487 r600_cs_parser_fini(&parser, r);
2490 r600_cs_parser_fini(&parser, r);
2494 void r600_cs_legacy_init(void)
2496 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;