2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 #define MAX(a,b) (((a)>(b))?(a):(b))
35 #define MIN(a,b) (((a)<(b))?(a):(b))
37 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc);
40 struct evergreen_cs_track {
47 u32 cb_color_base_last[12];
48 struct radeon_bo *cb_color_bo[12];
49 u32 cb_color_bo_offset[12];
50 struct radeon_bo *cb_color_fmask_bo[8];
51 struct radeon_bo *cb_color_cmask_bo[8];
52 u32 cb_color_info[12];
53 u32 cb_color_view[12];
54 u32 cb_color_pitch_idx[12];
55 u32 cb_color_slice_idx[12];
56 u32 cb_color_dim_idx[12];
58 u32 cb_color_pitch[12];
59 u32 cb_color_slice[12];
60 u32 cb_color_attrib[12];
61 u32 cb_color_cmask_slice[8];
62 u32 cb_color_fmask_slice[8];
65 u32 vgt_strmout_config;
66 u32 vgt_strmout_buffer_config;
67 struct radeon_bo *vgt_strmout_bo[4];
68 u64 vgt_strmout_bo_mc[4];
69 u32 vgt_strmout_bo_offset[4];
70 u32 vgt_strmout_size[4];
75 u32 db_depth_size_idx;
79 u32 db_z_write_offset;
80 struct radeon_bo *db_z_read_bo;
81 struct radeon_bo *db_z_write_bo;
85 u32 db_s_write_offset;
86 struct radeon_bo *db_s_read_bo;
87 struct radeon_bo *db_s_write_bo;
88 bool sx_misc_kill_all_prims;
91 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
93 if (tiling_flags & RADEON_TILING_MACRO)
94 return ARRAY_2D_TILED_THIN1;
95 else if (tiling_flags & RADEON_TILING_MICRO)
96 return ARRAY_1D_TILED_THIN1;
98 return ARRAY_LINEAR_GENERAL;
101 static u32 evergreen_cs_get_num_banks(u32 nbanks)
105 return ADDR_SURF_2_BANK;
107 return ADDR_SURF_4_BANK;
110 return ADDR_SURF_8_BANK;
112 return ADDR_SURF_16_BANK;
116 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
120 for (i = 0; i < 8; i++) {
121 track->cb_color_fmask_bo[i] = NULL;
122 track->cb_color_cmask_bo[i] = NULL;
123 track->cb_color_cmask_slice[i] = 0;
124 track->cb_color_fmask_slice[i] = 0;
127 for (i = 0; i < 12; i++) {
128 track->cb_color_base_last[i] = 0;
129 track->cb_color_bo[i] = NULL;
130 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
131 track->cb_color_info[i] = 0;
132 track->cb_color_view[i] = 0xFFFFFFFF;
133 track->cb_color_pitch_idx[i] = 0;
134 track->cb_color_slice_idx[i] = 0;
135 track->cb_color_dim[i] = 0;
136 track->cb_color_pitch[i] = 0;
137 track->cb_color_slice[i] = 0;
138 track->cb_color_dim[i] = 0;
140 track->cb_target_mask = 0xFFFFFFFF;
141 track->cb_shader_mask = 0xFFFFFFFF;
143 track->db_depth_view = 0xFFFFC000;
144 track->db_depth_size = 0xFFFFFFFF;
145 track->db_depth_size_idx = 0;
146 track->db_depth_control = 0xFFFFFFFF;
147 track->db_z_info = 0xFFFFFFFF;
148 track->db_z_idx = 0xFFFFFFFF;
149 track->db_z_read_offset = 0xFFFFFFFF;
150 track->db_z_write_offset = 0xFFFFFFFF;
151 track->db_z_read_bo = NULL;
152 track->db_z_write_bo = NULL;
153 track->db_s_info = 0xFFFFFFFF;
154 track->db_s_idx = 0xFFFFFFFF;
155 track->db_s_read_offset = 0xFFFFFFFF;
156 track->db_s_write_offset = 0xFFFFFFFF;
157 track->db_s_read_bo = NULL;
158 track->db_s_write_bo = NULL;
160 for (i = 0; i < 4; i++) {
161 track->vgt_strmout_size[i] = 0;
162 track->vgt_strmout_bo[i] = NULL;
163 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
164 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
166 track->sx_misc_kill_all_prims = false;
170 /* value gathered from cs */
186 unsigned long base_align;
189 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
190 struct eg_surface *surf,
193 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
194 surf->base_align = surf->bpe;
200 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
201 struct eg_surface *surf,
204 struct evergreen_cs_track *track = p->track;
207 palign = MAX(64, track->group_size / surf->bpe);
208 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
209 surf->base_align = track->group_size;
210 surf->palign = palign;
212 if (surf->nbx & (palign - 1)) {
214 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
215 __func__, __LINE__, prefix, surf->nbx, palign);
222 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
223 struct eg_surface *surf,
226 struct evergreen_cs_track *track = p->track;
229 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
230 palign = MAX(8, palign);
231 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
232 surf->base_align = track->group_size;
233 surf->palign = palign;
235 if ((surf->nbx & (palign - 1))) {
237 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
238 __func__, __LINE__, prefix, surf->nbx, palign,
239 track->group_size, surf->bpe, surf->nsamples);
243 if ((surf->nby & (8 - 1))) {
245 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
246 __func__, __LINE__, prefix, surf->nby);
253 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
254 struct eg_surface *surf,
257 struct evergreen_cs_track *track = p->track;
258 unsigned palign, halign, tileb, slice_pt;
260 tileb = 64 * surf->bpe * surf->nsamples;
261 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
262 palign = MAX(8, palign);
264 if (tileb > surf->tsplit) {
265 slice_pt = tileb / surf->tsplit;
267 tileb = tileb / slice_pt;
268 /* macro tile width & height */
269 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
270 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
271 surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
272 surf->base_align = (palign / 8) * (halign / 8) * tileb;
273 surf->palign = palign;
274 surf->halign = halign;
276 if ((surf->nbx & (palign - 1))) {
278 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
279 __func__, __LINE__, prefix, surf->nbx, palign);
283 if ((surf->nby & (halign - 1))) {
285 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
286 __func__, __LINE__, prefix, surf->nby, halign);
294 static int evergreen_surface_check(struct radeon_cs_parser *p,
295 struct eg_surface *surf,
298 /* some common value computed here */
299 surf->bpe = r600_fmt_get_blocksize(surf->format);
301 switch (surf->mode) {
302 case ARRAY_LINEAR_GENERAL:
303 return evergreen_surface_check_linear(p, surf, prefix);
304 case ARRAY_LINEAR_ALIGNED:
305 return evergreen_surface_check_linear_aligned(p, surf, prefix);
306 case ARRAY_1D_TILED_THIN1:
307 return evergreen_surface_check_1d(p, surf, prefix);
308 case ARRAY_2D_TILED_THIN1:
309 return evergreen_surface_check_2d(p, surf, prefix);
311 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
312 __func__, __LINE__, prefix, surf->mode);
318 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
319 struct eg_surface *surf,
322 switch (surf->mode) {
323 case ARRAY_2D_TILED_THIN1:
325 case ARRAY_LINEAR_GENERAL:
326 case ARRAY_LINEAR_ALIGNED:
327 case ARRAY_1D_TILED_THIN1:
330 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
331 __func__, __LINE__, prefix, surf->mode);
335 switch (surf->nbanks) {
336 case 0: surf->nbanks = 2; break;
337 case 1: surf->nbanks = 4; break;
338 case 2: surf->nbanks = 8; break;
339 case 3: surf->nbanks = 16; break;
341 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
342 __func__, __LINE__, prefix, surf->nbanks);
345 switch (surf->bankw) {
346 case 0: surf->bankw = 1; break;
347 case 1: surf->bankw = 2; break;
348 case 2: surf->bankw = 4; break;
349 case 3: surf->bankw = 8; break;
351 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
352 __func__, __LINE__, prefix, surf->bankw);
355 switch (surf->bankh) {
356 case 0: surf->bankh = 1; break;
357 case 1: surf->bankh = 2; break;
358 case 2: surf->bankh = 4; break;
359 case 3: surf->bankh = 8; break;
361 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
362 __func__, __LINE__, prefix, surf->bankh);
365 switch (surf->mtilea) {
366 case 0: surf->mtilea = 1; break;
367 case 1: surf->mtilea = 2; break;
368 case 2: surf->mtilea = 4; break;
369 case 3: surf->mtilea = 8; break;
371 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
372 __func__, __LINE__, prefix, surf->mtilea);
375 switch (surf->tsplit) {
376 case 0: surf->tsplit = 64; break;
377 case 1: surf->tsplit = 128; break;
378 case 2: surf->tsplit = 256; break;
379 case 3: surf->tsplit = 512; break;
380 case 4: surf->tsplit = 1024; break;
381 case 5: surf->tsplit = 2048; break;
382 case 6: surf->tsplit = 4096; break;
384 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
385 __func__, __LINE__, prefix, surf->tsplit);
391 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
393 struct evergreen_cs_track *track = p->track;
394 struct eg_surface surf;
395 unsigned pitch, slice, mslice;
396 unsigned long offset;
399 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
400 pitch = track->cb_color_pitch[id];
401 slice = track->cb_color_slice[id];
402 surf.nbx = (pitch + 1) * 8;
403 surf.nby = ((slice + 1) * 64) / surf.nbx;
404 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
405 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
406 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
407 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
408 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
409 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
410 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
413 if (!r600_fmt_is_valid_color(surf.format)) {
414 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
415 __func__, __LINE__, surf.format,
416 id, track->cb_color_info[id]);
420 r = evergreen_surface_value_conv_check(p, &surf, "cb");
425 r = evergreen_surface_check(p, &surf, "cb");
427 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
428 __func__, __LINE__, id, track->cb_color_pitch[id],
429 track->cb_color_slice[id], track->cb_color_attrib[id],
430 track->cb_color_info[id]);
434 offset = track->cb_color_bo_offset[id] << 8;
435 if (offset & (surf.base_align - 1)) {
436 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
437 __func__, __LINE__, id, offset, surf.base_align);
441 offset += surf.layer_size * mslice;
442 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
443 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
444 "offset %d, max layer %d, bo size %ld, slice %d)\n",
445 __func__, __LINE__, id, surf.layer_size,
446 track->cb_color_bo_offset[id] << 8, mslice,
447 radeon_bo_size(track->cb_color_bo[id]), slice);
448 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
449 __func__, __LINE__, surf.nbx, surf.nby,
450 surf.mode, surf.bpe, surf.nsamples,
451 surf.bankw, surf.bankh,
452 surf.tsplit, surf.mtilea);
459 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
461 struct evergreen_cs_track *track = p->track;
462 struct eg_surface surf;
463 unsigned pitch, slice, mslice;
464 unsigned long offset;
467 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
468 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
469 slice = track->db_depth_slice;
470 surf.nbx = (pitch + 1) * 8;
471 surf.nby = ((slice + 1) * 64) / surf.nbx;
472 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
473 surf.format = G_028044_FORMAT(track->db_s_info);
474 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
475 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
476 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
477 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
478 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
481 if (surf.format != 1) {
482 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
483 __func__, __LINE__, surf.format);
486 /* replace by color format so we can use same code */
487 surf.format = V_028C70_COLOR_8;
489 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
494 r = evergreen_surface_check(p, &surf, NULL);
496 /* old userspace doesn't compute proper depth/stencil alignment
497 * check that alignment against a bigger byte per elements and
498 * only report if that alignment is wrong too.
500 surf.format = V_028C70_COLOR_8_8_8_8;
501 r = evergreen_surface_check(p, &surf, "stencil");
503 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
504 __func__, __LINE__, track->db_depth_size,
505 track->db_depth_slice, track->db_s_info, track->db_z_info);
510 offset = track->db_s_read_offset << 8;
511 if (offset & (surf.base_align - 1)) {
512 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
513 __func__, __LINE__, offset, surf.base_align);
516 offset += surf.layer_size * mslice;
517 if (offset > radeon_bo_size(track->db_s_read_bo)) {
518 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
519 "offset %ld, max layer %d, bo size %ld)\n",
520 __func__, __LINE__, surf.layer_size,
521 (unsigned long)track->db_s_read_offset << 8, mslice,
522 radeon_bo_size(track->db_s_read_bo));
523 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
524 __func__, __LINE__, track->db_depth_size,
525 track->db_depth_slice, track->db_s_info, track->db_z_info);
529 offset = track->db_s_write_offset << 8;
530 if (offset & (surf.base_align - 1)) {
531 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
532 __func__, __LINE__, offset, surf.base_align);
535 offset += surf.layer_size * mslice;
536 if (offset > radeon_bo_size(track->db_s_write_bo)) {
537 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
538 "offset %ld, max layer %d, bo size %ld)\n",
539 __func__, __LINE__, surf.layer_size,
540 (unsigned long)track->db_s_write_offset << 8, mslice,
541 radeon_bo_size(track->db_s_write_bo));
548 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
550 struct evergreen_cs_track *track = p->track;
551 struct eg_surface surf;
552 unsigned pitch, slice, mslice;
553 unsigned long offset;
556 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
557 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
558 slice = track->db_depth_slice;
559 surf.nbx = (pitch + 1) * 8;
560 surf.nby = ((slice + 1) * 64) / surf.nbx;
561 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
562 surf.format = G_028040_FORMAT(track->db_z_info);
563 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
564 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
565 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
566 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
567 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
570 switch (surf.format) {
572 surf.format = V_028C70_COLOR_16;
575 case V_028040_Z_32_FLOAT:
576 surf.format = V_028C70_COLOR_8_8_8_8;
579 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
580 __func__, __LINE__, surf.format);
584 r = evergreen_surface_value_conv_check(p, &surf, "depth");
586 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
587 __func__, __LINE__, track->db_depth_size,
588 track->db_depth_slice, track->db_z_info);
592 r = evergreen_surface_check(p, &surf, "depth");
594 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
595 __func__, __LINE__, track->db_depth_size,
596 track->db_depth_slice, track->db_z_info);
600 offset = track->db_z_read_offset << 8;
601 if (offset & (surf.base_align - 1)) {
602 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
603 __func__, __LINE__, offset, surf.base_align);
606 offset += surf.layer_size * mslice;
607 if (offset > radeon_bo_size(track->db_z_read_bo)) {
608 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
609 "offset %ld, max layer %d, bo size %ld)\n",
610 __func__, __LINE__, surf.layer_size,
611 (unsigned long)track->db_z_read_offset << 8, mslice,
612 radeon_bo_size(track->db_z_read_bo));
616 offset = track->db_z_write_offset << 8;
617 if (offset & (surf.base_align - 1)) {
618 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
619 __func__, __LINE__, offset, surf.base_align);
622 offset += surf.layer_size * mslice;
623 if (offset > radeon_bo_size(track->db_z_write_bo)) {
624 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
625 "offset %ld, max layer %d, bo size %ld)\n",
626 __func__, __LINE__, surf.layer_size,
627 (unsigned long)track->db_z_write_offset << 8, mslice,
628 radeon_bo_size(track->db_z_write_bo));
635 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
636 struct radeon_bo *texture,
637 struct radeon_bo *mipmap,
640 struct eg_surface surf;
641 unsigned long toffset, moffset;
642 unsigned dim, llevel, mslice, width, height, depth, i;
646 texdw[0] = radeon_get_ib_value(p, idx + 0);
647 texdw[1] = radeon_get_ib_value(p, idx + 1);
648 texdw[2] = radeon_get_ib_value(p, idx + 2);
649 texdw[3] = radeon_get_ib_value(p, idx + 3);
650 texdw[4] = radeon_get_ib_value(p, idx + 4);
651 texdw[5] = radeon_get_ib_value(p, idx + 5);
652 texdw[6] = radeon_get_ib_value(p, idx + 6);
653 texdw[7] = radeon_get_ib_value(p, idx + 7);
654 dim = G_030000_DIM(texdw[0]);
655 llevel = G_030014_LAST_LEVEL(texdw[5]);
656 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
657 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
658 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
659 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
660 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
661 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
662 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
663 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
664 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
665 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
666 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
667 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
668 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
669 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
671 toffset = texdw[2] << 8;
672 moffset = texdw[3] << 8;
674 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
675 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
676 __func__, __LINE__, surf.format);
680 case V_030000_SQ_TEX_DIM_1D:
681 case V_030000_SQ_TEX_DIM_2D:
682 case V_030000_SQ_TEX_DIM_CUBEMAP:
683 case V_030000_SQ_TEX_DIM_1D_ARRAY:
684 case V_030000_SQ_TEX_DIM_2D_ARRAY:
686 case V_030000_SQ_TEX_DIM_3D:
689 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
690 __func__, __LINE__, dim);
694 r = evergreen_surface_value_conv_check(p, &surf, "texture");
700 evergreen_surface_check(p, &surf, NULL);
701 surf.nby = ALIGN(surf.nby, surf.halign);
703 r = evergreen_surface_check(p, &surf, "texture");
705 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
706 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
707 texdw[5], texdw[6], texdw[7]);
711 /* check texture size */
712 if (toffset & (surf.base_align - 1)) {
713 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
714 __func__, __LINE__, toffset, surf.base_align);
717 if (moffset & (surf.base_align - 1)) {
718 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
719 __func__, __LINE__, moffset, surf.base_align);
722 if (dim == SQ_TEX_DIM_3D) {
723 toffset += surf.layer_size * depth;
725 toffset += surf.layer_size * mslice;
727 if (toffset > radeon_bo_size(texture)) {
728 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
729 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
730 __func__, __LINE__, surf.layer_size,
731 (unsigned long)texdw[2] << 8, mslice,
732 depth, radeon_bo_size(texture),
737 /* check mipmap size */
738 for (i = 1; i <= llevel; i++) {
741 w = r600_mip_minify(width, i);
742 h = r600_mip_minify(height, i);
743 d = r600_mip_minify(depth, i);
744 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
745 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
748 case ARRAY_2D_TILED_THIN1:
749 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
750 surf.mode = ARRAY_1D_TILED_THIN1;
752 /* recompute alignment */
753 evergreen_surface_check(p, &surf, NULL);
755 case ARRAY_LINEAR_GENERAL:
756 case ARRAY_LINEAR_ALIGNED:
757 case ARRAY_1D_TILED_THIN1:
760 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
761 __func__, __LINE__, surf.mode);
764 surf.nbx = ALIGN(surf.nbx, surf.palign);
765 surf.nby = ALIGN(surf.nby, surf.halign);
767 r = evergreen_surface_check(p, &surf, "mipmap");
772 if (dim == SQ_TEX_DIM_3D) {
773 moffset += surf.layer_size * d;
775 moffset += surf.layer_size * mslice;
777 if (moffset > radeon_bo_size(mipmap)) {
778 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
779 "offset %ld, coffset %ld, max layer %d, depth %d, "
780 "bo size %ld) level0 (%d %d %d)\n",
781 __func__, __LINE__, i, surf.layer_size,
782 (unsigned long)texdw[3] << 8, moffset, mslice,
783 d, radeon_bo_size(mipmap),
784 width, height, depth);
785 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
786 __func__, __LINE__, surf.nbx, surf.nby,
787 surf.mode, surf.bpe, surf.nsamples,
788 surf.bankw, surf.bankh,
789 surf.tsplit, surf.mtilea);
797 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
799 struct evergreen_cs_track *track = p->track;
803 /* check streamout */
804 for (i = 0; i < 4; i++) {
805 if (track->vgt_strmout_config & (1 << i)) {
806 for (j = 0; j < 4; j++) {
807 if ((track->vgt_strmout_buffer_config >> (i * 4)) & (1 << j)) {
808 if (track->vgt_strmout_bo[j]) {
809 u64 offset = (u64)track->vgt_strmout_bo_offset[j] +
810 (u64)track->vgt_strmout_size[j];
811 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
812 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
814 radeon_bo_size(track->vgt_strmout_bo[j]));
818 dev_warn(p->dev, "No buffer for streamout %d\n", j);
826 if (track->sx_misc_kill_all_prims)
829 /* check that we have a cb for each enabled target
831 tmp = track->cb_target_mask;
832 for (i = 0; i < 8; i++) {
833 if ((tmp >> (i * 4)) & 0xF) {
834 /* at least one component is enabled */
835 if (track->cb_color_bo[i] == NULL) {
836 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
837 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
841 r = evergreen_cs_track_validate_cb(p, i);
848 /* Check stencil buffer */
849 if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
850 r = evergreen_cs_track_validate_stencil(p);
854 /* Check depth buffer */
855 if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) {
856 r = evergreen_cs_track_validate_depth(p);
865 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
866 * @parser: parser structure holding parsing context.
867 * @pkt: where to store packet informations
869 * Assume that chunk_ib_index is properly set. Will return -EINVAL
870 * if packet is bigger than remaining ib size. or if packets is unknown.
872 int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
873 struct radeon_cs_packet *pkt,
876 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
879 if (idx >= ib_chunk->length_dw) {
880 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
881 idx, ib_chunk->length_dw);
884 header = radeon_get_ib_value(p, idx);
886 pkt->type = CP_PACKET_GET_TYPE(header);
887 pkt->count = CP_PACKET_GET_COUNT(header);
891 pkt->reg = CP_PACKET0_GET_REG(header);
894 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
900 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
903 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
904 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
905 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
912 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
913 * @parser: parser structure holding parsing context.
914 * @data: pointer to relocation data
915 * @offset_start: starting offset
916 * @offset_mask: offset mask (to align start offset on)
917 * @reloc: reloc informations
919 * Check next packet is relocation packet3, do bo validation and compute
920 * GPU offset using the provided start.
922 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
923 struct radeon_cs_reloc **cs_reloc)
925 struct radeon_cs_chunk *relocs_chunk;
926 struct radeon_cs_packet p3reloc;
930 if (p->chunk_relocs_idx == -1) {
931 DRM_ERROR("No relocation chunk !\n");
935 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
936 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
940 p->idx += p3reloc.count + 2;
941 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
942 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
946 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
947 if (idx >= relocs_chunk->length_dw) {
948 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
949 idx, relocs_chunk->length_dw);
952 /* FIXME: we assume reloc size is 4 dwords */
953 *cs_reloc = p->relocs_ptr[(idx / 4)];
958 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
959 * @parser: parser structure holding parsing context.
961 * Userspace sends a special sequence for VLINE waits.
962 * PACKET0 - VLINE_START_END + value
963 * PACKET3 - WAIT_REG_MEM poll vline status reg
964 * RELOC (P3) - crtc_id in reloc.
966 * This function parses this and relocates the VLINE START END
967 * and WAIT_REG_MEM packets to the correct crtc.
968 * It also detects a switched off crtc and nulls out the
971 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
973 struct drm_mode_object *obj;
974 struct drm_crtc *crtc;
975 struct radeon_crtc *radeon_crtc;
976 struct radeon_cs_packet p3reloc, wait_reg_mem;
979 uint32_t header, h_idx, reg, wait_reg_mem_info;
980 volatile uint32_t *ib;
984 /* parse the WAIT_REG_MEM */
985 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
989 /* check its a WAIT_REG_MEM */
990 if (wait_reg_mem.type != PACKET_TYPE3 ||
991 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
992 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
996 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
997 /* bit 4 is reg (0) or mem (1) */
998 if (wait_reg_mem_info & 0x10) {
999 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
1002 /* waiting for value to be equal */
1003 if ((wait_reg_mem_info & 0x7) != 0x3) {
1004 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
1007 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
1008 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1012 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
1013 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1017 /* jump over the NOP */
1018 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1023 p->idx += wait_reg_mem.count + 2;
1024 p->idx += p3reloc.count + 2;
1026 header = radeon_get_ib_value(p, h_idx);
1027 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1028 reg = CP_PACKET0_GET_REG(header);
1029 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1031 DRM_ERROR("cannot find crtc %d\n", crtc_id);
1034 crtc = obj_to_crtc(obj);
1035 radeon_crtc = to_radeon_crtc(crtc);
1036 crtc_id = radeon_crtc->crtc_id;
1038 if (!crtc->enabled) {
1039 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1040 ib[h_idx + 2] = PACKET2(0);
1041 ib[h_idx + 3] = PACKET2(0);
1042 ib[h_idx + 4] = PACKET2(0);
1043 ib[h_idx + 5] = PACKET2(0);
1044 ib[h_idx + 6] = PACKET2(0);
1045 ib[h_idx + 7] = PACKET2(0);
1046 ib[h_idx + 8] = PACKET2(0);
1049 case EVERGREEN_VLINE_START_END:
1050 header &= ~R600_CP_PACKET0_REG_MASK;
1051 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
1053 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
1056 DRM_ERROR("unknown crtc reloc\n");
1063 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1064 struct radeon_cs_packet *pkt,
1065 unsigned idx, unsigned reg)
1070 case EVERGREEN_VLINE_START_END:
1071 r = evergreen_cs_packet_parse_vline(p);
1073 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1079 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1086 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1087 struct radeon_cs_packet *pkt)
1095 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1096 r = evergreen_packet0_check(p, pkt, idx, reg);
1105 * evergreen_cs_check_reg() - check if register is authorized or not
1106 * @parser: parser structure holding parsing context
1107 * @reg: register we are testing
1108 * @idx: index into the cs buffer
1110 * This function will test against evergreen_reg_safe_bm and return 0
1111 * if register is safe. If register is not flag as safe this function
1112 * will test it against a list of register needind special handling.
1114 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1116 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1117 struct radeon_cs_reloc *reloc;
1122 if (p->rdev->family >= CHIP_CAYMAN)
1123 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1125 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1128 if (i >= last_reg) {
1129 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1132 m = 1 << ((reg >> 2) & 31);
1133 if (p->rdev->family >= CHIP_CAYMAN) {
1134 if (!(cayman_reg_safe_bm[i] & m))
1137 if (!(evergreen_reg_safe_bm[i] & m))
1142 /* force following reg to 0 in an attempt to disable out buffer
1143 * which will need us to better understand how it works to perform
1144 * security check on it (Jerome)
1146 case SQ_ESGS_RING_SIZE:
1147 case SQ_GSVS_RING_SIZE:
1148 case SQ_ESTMP_RING_SIZE:
1149 case SQ_GSTMP_RING_SIZE:
1150 case SQ_HSTMP_RING_SIZE:
1151 case SQ_LSTMP_RING_SIZE:
1152 case SQ_PSTMP_RING_SIZE:
1153 case SQ_VSTMP_RING_SIZE:
1154 case SQ_ESGS_RING_ITEMSIZE:
1155 case SQ_ESTMP_RING_ITEMSIZE:
1156 case SQ_GSTMP_RING_ITEMSIZE:
1157 case SQ_GSVS_RING_ITEMSIZE:
1158 case SQ_GS_VERT_ITEMSIZE:
1159 case SQ_GS_VERT_ITEMSIZE_1:
1160 case SQ_GS_VERT_ITEMSIZE_2:
1161 case SQ_GS_VERT_ITEMSIZE_3:
1162 case SQ_GSVS_RING_OFFSET_1:
1163 case SQ_GSVS_RING_OFFSET_2:
1164 case SQ_GSVS_RING_OFFSET_3:
1165 case SQ_HSTMP_RING_ITEMSIZE:
1166 case SQ_LSTMP_RING_ITEMSIZE:
1167 case SQ_PSTMP_RING_ITEMSIZE:
1168 case SQ_VSTMP_RING_ITEMSIZE:
1169 case VGT_TF_RING_SIZE:
1170 /* get value to populate the IB don't remove */
1171 /*tmp =radeon_get_ib_value(p, idx);
1174 case SQ_ESGS_RING_BASE:
1175 case SQ_GSVS_RING_BASE:
1176 case SQ_ESTMP_RING_BASE:
1177 case SQ_GSTMP_RING_BASE:
1178 case SQ_HSTMP_RING_BASE:
1179 case SQ_LSTMP_RING_BASE:
1180 case SQ_PSTMP_RING_BASE:
1181 case SQ_VSTMP_RING_BASE:
1182 r = evergreen_cs_packet_next_reloc(p, &reloc);
1184 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1188 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1190 case DB_DEPTH_CONTROL:
1191 track->db_depth_control = radeon_get_ib_value(p, idx);
1193 case CAYMAN_DB_EQAA:
1194 if (p->rdev->family < CHIP_CAYMAN) {
1195 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1200 case CAYMAN_DB_DEPTH_INFO:
1201 if (p->rdev->family < CHIP_CAYMAN) {
1202 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1208 track->db_z_info = radeon_get_ib_value(p, idx);
1209 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1210 r = evergreen_cs_packet_next_reloc(p, &reloc);
1212 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1216 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1217 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1218 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1219 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1220 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1221 unsigned bankw, bankh, mtaspect, tile_split;
1223 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1224 &bankw, &bankh, &mtaspect,
1226 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1227 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1228 DB_BANK_WIDTH(bankw) |
1229 DB_BANK_HEIGHT(bankh) |
1230 DB_MACRO_TILE_ASPECT(mtaspect);
1234 case DB_STENCIL_INFO:
1235 track->db_s_info = radeon_get_ib_value(p, idx);
1238 track->db_depth_view = radeon_get_ib_value(p, idx);
1241 track->db_depth_size = radeon_get_ib_value(p, idx);
1242 track->db_depth_size_idx = idx;
1244 case R_02805C_DB_DEPTH_SLICE:
1245 track->db_depth_slice = radeon_get_ib_value(p, idx);
1247 case DB_Z_READ_BASE:
1248 r = evergreen_cs_packet_next_reloc(p, &reloc);
1250 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1254 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1255 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1256 track->db_z_read_bo = reloc->robj;
1258 case DB_Z_WRITE_BASE:
1259 r = evergreen_cs_packet_next_reloc(p, &reloc);
1261 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1265 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1266 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1267 track->db_z_write_bo = reloc->robj;
1269 case DB_STENCIL_READ_BASE:
1270 r = evergreen_cs_packet_next_reloc(p, &reloc);
1272 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1276 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1277 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1278 track->db_s_read_bo = reloc->robj;
1280 case DB_STENCIL_WRITE_BASE:
1281 r = evergreen_cs_packet_next_reloc(p, &reloc);
1283 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1287 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1288 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1289 track->db_s_write_bo = reloc->robj;
1291 case VGT_STRMOUT_CONFIG:
1292 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1294 case VGT_STRMOUT_BUFFER_CONFIG:
1295 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1297 case VGT_STRMOUT_BUFFER_BASE_0:
1298 case VGT_STRMOUT_BUFFER_BASE_1:
1299 case VGT_STRMOUT_BUFFER_BASE_2:
1300 case VGT_STRMOUT_BUFFER_BASE_3:
1301 r = evergreen_cs_packet_next_reloc(p, &reloc);
1303 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1307 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1308 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1309 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1310 track->vgt_strmout_bo[tmp] = reloc->robj;
1311 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1313 case VGT_STRMOUT_BUFFER_SIZE_0:
1314 case VGT_STRMOUT_BUFFER_SIZE_1:
1315 case VGT_STRMOUT_BUFFER_SIZE_2:
1316 case VGT_STRMOUT_BUFFER_SIZE_3:
1317 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1318 /* size in register is DWs, convert to bytes */
1319 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1322 r = evergreen_cs_packet_next_reloc(p, &reloc);
1324 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1328 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1329 case CB_TARGET_MASK:
1330 track->cb_target_mask = radeon_get_ib_value(p, idx);
1332 case CB_SHADER_MASK:
1333 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1335 case PA_SC_AA_CONFIG:
1336 if (p->rdev->family >= CHIP_CAYMAN) {
1337 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1341 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1342 track->nsamples = 1 << tmp;
1344 case CAYMAN_PA_SC_AA_CONFIG:
1345 if (p->rdev->family < CHIP_CAYMAN) {
1346 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1350 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1351 track->nsamples = 1 << tmp;
1353 case CB_COLOR0_VIEW:
1354 case CB_COLOR1_VIEW:
1355 case CB_COLOR2_VIEW:
1356 case CB_COLOR3_VIEW:
1357 case CB_COLOR4_VIEW:
1358 case CB_COLOR5_VIEW:
1359 case CB_COLOR6_VIEW:
1360 case CB_COLOR7_VIEW:
1361 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1362 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1364 case CB_COLOR8_VIEW:
1365 case CB_COLOR9_VIEW:
1366 case CB_COLOR10_VIEW:
1367 case CB_COLOR11_VIEW:
1368 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1369 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1371 case CB_COLOR0_INFO:
1372 case CB_COLOR1_INFO:
1373 case CB_COLOR2_INFO:
1374 case CB_COLOR3_INFO:
1375 case CB_COLOR4_INFO:
1376 case CB_COLOR5_INFO:
1377 case CB_COLOR6_INFO:
1378 case CB_COLOR7_INFO:
1379 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1380 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1381 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1382 r = evergreen_cs_packet_next_reloc(p, &reloc);
1384 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1388 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1389 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1392 case CB_COLOR8_INFO:
1393 case CB_COLOR9_INFO:
1394 case CB_COLOR10_INFO:
1395 case CB_COLOR11_INFO:
1396 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1397 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1398 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1399 r = evergreen_cs_packet_next_reloc(p, &reloc);
1401 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1405 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1406 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1409 case CB_COLOR0_PITCH:
1410 case CB_COLOR1_PITCH:
1411 case CB_COLOR2_PITCH:
1412 case CB_COLOR3_PITCH:
1413 case CB_COLOR4_PITCH:
1414 case CB_COLOR5_PITCH:
1415 case CB_COLOR6_PITCH:
1416 case CB_COLOR7_PITCH:
1417 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1418 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1419 track->cb_color_pitch_idx[tmp] = idx;
1421 case CB_COLOR8_PITCH:
1422 case CB_COLOR9_PITCH:
1423 case CB_COLOR10_PITCH:
1424 case CB_COLOR11_PITCH:
1425 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1426 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1427 track->cb_color_pitch_idx[tmp] = idx;
1429 case CB_COLOR0_SLICE:
1430 case CB_COLOR1_SLICE:
1431 case CB_COLOR2_SLICE:
1432 case CB_COLOR3_SLICE:
1433 case CB_COLOR4_SLICE:
1434 case CB_COLOR5_SLICE:
1435 case CB_COLOR6_SLICE:
1436 case CB_COLOR7_SLICE:
1437 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1438 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1439 track->cb_color_slice_idx[tmp] = idx;
1441 case CB_COLOR8_SLICE:
1442 case CB_COLOR9_SLICE:
1443 case CB_COLOR10_SLICE:
1444 case CB_COLOR11_SLICE:
1445 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1446 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1447 track->cb_color_slice_idx[tmp] = idx;
1449 case CB_COLOR0_ATTRIB:
1450 case CB_COLOR1_ATTRIB:
1451 case CB_COLOR2_ATTRIB:
1452 case CB_COLOR3_ATTRIB:
1453 case CB_COLOR4_ATTRIB:
1454 case CB_COLOR5_ATTRIB:
1455 case CB_COLOR6_ATTRIB:
1456 case CB_COLOR7_ATTRIB:
1457 r = evergreen_cs_packet_next_reloc(p, &reloc);
1459 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1463 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1464 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1465 unsigned bankw, bankh, mtaspect, tile_split;
1467 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1468 &bankw, &bankh, &mtaspect,
1470 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1471 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1472 CB_BANK_WIDTH(bankw) |
1473 CB_BANK_HEIGHT(bankh) |
1474 CB_MACRO_TILE_ASPECT(mtaspect);
1477 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1478 track->cb_color_attrib[tmp] = ib[idx];
1480 case CB_COLOR8_ATTRIB:
1481 case CB_COLOR9_ATTRIB:
1482 case CB_COLOR10_ATTRIB:
1483 case CB_COLOR11_ATTRIB:
1484 r = evergreen_cs_packet_next_reloc(p, &reloc);
1486 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1490 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1491 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1492 unsigned bankw, bankh, mtaspect, tile_split;
1494 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1495 &bankw, &bankh, &mtaspect,
1497 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1498 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1499 CB_BANK_WIDTH(bankw) |
1500 CB_BANK_HEIGHT(bankh) |
1501 CB_MACRO_TILE_ASPECT(mtaspect);
1504 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1505 track->cb_color_attrib[tmp] = ib[idx];
1515 tmp = (reg - CB_COLOR0_DIM) / 0x3c;
1516 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
1517 track->cb_color_dim_idx[tmp] = idx;
1521 case CB_COLOR10_DIM:
1522 case CB_COLOR11_DIM:
1523 tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
1524 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
1525 track->cb_color_dim_idx[tmp] = idx;
1527 case CB_COLOR0_FMASK:
1528 case CB_COLOR1_FMASK:
1529 case CB_COLOR2_FMASK:
1530 case CB_COLOR3_FMASK:
1531 case CB_COLOR4_FMASK:
1532 case CB_COLOR5_FMASK:
1533 case CB_COLOR6_FMASK:
1534 case CB_COLOR7_FMASK:
1535 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1536 r = evergreen_cs_packet_next_reloc(p, &reloc);
1538 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1541 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1542 track->cb_color_fmask_bo[tmp] = reloc->robj;
1544 case CB_COLOR0_CMASK:
1545 case CB_COLOR1_CMASK:
1546 case CB_COLOR2_CMASK:
1547 case CB_COLOR3_CMASK:
1548 case CB_COLOR4_CMASK:
1549 case CB_COLOR5_CMASK:
1550 case CB_COLOR6_CMASK:
1551 case CB_COLOR7_CMASK:
1552 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1553 r = evergreen_cs_packet_next_reloc(p, &reloc);
1555 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1558 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1559 track->cb_color_cmask_bo[tmp] = reloc->robj;
1561 case CB_COLOR0_FMASK_SLICE:
1562 case CB_COLOR1_FMASK_SLICE:
1563 case CB_COLOR2_FMASK_SLICE:
1564 case CB_COLOR3_FMASK_SLICE:
1565 case CB_COLOR4_FMASK_SLICE:
1566 case CB_COLOR5_FMASK_SLICE:
1567 case CB_COLOR6_FMASK_SLICE:
1568 case CB_COLOR7_FMASK_SLICE:
1569 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1570 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1572 case CB_COLOR0_CMASK_SLICE:
1573 case CB_COLOR1_CMASK_SLICE:
1574 case CB_COLOR2_CMASK_SLICE:
1575 case CB_COLOR3_CMASK_SLICE:
1576 case CB_COLOR4_CMASK_SLICE:
1577 case CB_COLOR5_CMASK_SLICE:
1578 case CB_COLOR6_CMASK_SLICE:
1579 case CB_COLOR7_CMASK_SLICE:
1580 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1581 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1583 case CB_COLOR0_BASE:
1584 case CB_COLOR1_BASE:
1585 case CB_COLOR2_BASE:
1586 case CB_COLOR3_BASE:
1587 case CB_COLOR4_BASE:
1588 case CB_COLOR5_BASE:
1589 case CB_COLOR6_BASE:
1590 case CB_COLOR7_BASE:
1591 r = evergreen_cs_packet_next_reloc(p, &reloc);
1593 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1597 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1598 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1599 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1600 track->cb_color_base_last[tmp] = ib[idx];
1601 track->cb_color_bo[tmp] = reloc->robj;
1603 case CB_COLOR8_BASE:
1604 case CB_COLOR9_BASE:
1605 case CB_COLOR10_BASE:
1606 case CB_COLOR11_BASE:
1607 r = evergreen_cs_packet_next_reloc(p, &reloc);
1609 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1613 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1614 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1615 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1616 track->cb_color_base_last[tmp] = ib[idx];
1617 track->cb_color_bo[tmp] = reloc->robj;
1619 case CB_IMMED0_BASE:
1620 case CB_IMMED1_BASE:
1621 case CB_IMMED2_BASE:
1622 case CB_IMMED3_BASE:
1623 case CB_IMMED4_BASE:
1624 case CB_IMMED5_BASE:
1625 case CB_IMMED6_BASE:
1626 case CB_IMMED7_BASE:
1627 case CB_IMMED8_BASE:
1628 case CB_IMMED9_BASE:
1629 case CB_IMMED10_BASE:
1630 case CB_IMMED11_BASE:
1631 case DB_HTILE_DATA_BASE:
1632 case SQ_PGM_START_FS:
1633 case SQ_PGM_START_ES:
1634 case SQ_PGM_START_VS:
1635 case SQ_PGM_START_GS:
1636 case SQ_PGM_START_PS:
1637 case SQ_PGM_START_HS:
1638 case SQ_PGM_START_LS:
1639 case SQ_CONST_MEM_BASE:
1640 case SQ_ALU_CONST_CACHE_GS_0:
1641 case SQ_ALU_CONST_CACHE_GS_1:
1642 case SQ_ALU_CONST_CACHE_GS_2:
1643 case SQ_ALU_CONST_CACHE_GS_3:
1644 case SQ_ALU_CONST_CACHE_GS_4:
1645 case SQ_ALU_CONST_CACHE_GS_5:
1646 case SQ_ALU_CONST_CACHE_GS_6:
1647 case SQ_ALU_CONST_CACHE_GS_7:
1648 case SQ_ALU_CONST_CACHE_GS_8:
1649 case SQ_ALU_CONST_CACHE_GS_9:
1650 case SQ_ALU_CONST_CACHE_GS_10:
1651 case SQ_ALU_CONST_CACHE_GS_11:
1652 case SQ_ALU_CONST_CACHE_GS_12:
1653 case SQ_ALU_CONST_CACHE_GS_13:
1654 case SQ_ALU_CONST_CACHE_GS_14:
1655 case SQ_ALU_CONST_CACHE_GS_15:
1656 case SQ_ALU_CONST_CACHE_PS_0:
1657 case SQ_ALU_CONST_CACHE_PS_1:
1658 case SQ_ALU_CONST_CACHE_PS_2:
1659 case SQ_ALU_CONST_CACHE_PS_3:
1660 case SQ_ALU_CONST_CACHE_PS_4:
1661 case SQ_ALU_CONST_CACHE_PS_5:
1662 case SQ_ALU_CONST_CACHE_PS_6:
1663 case SQ_ALU_CONST_CACHE_PS_7:
1664 case SQ_ALU_CONST_CACHE_PS_8:
1665 case SQ_ALU_CONST_CACHE_PS_9:
1666 case SQ_ALU_CONST_CACHE_PS_10:
1667 case SQ_ALU_CONST_CACHE_PS_11:
1668 case SQ_ALU_CONST_CACHE_PS_12:
1669 case SQ_ALU_CONST_CACHE_PS_13:
1670 case SQ_ALU_CONST_CACHE_PS_14:
1671 case SQ_ALU_CONST_CACHE_PS_15:
1672 case SQ_ALU_CONST_CACHE_VS_0:
1673 case SQ_ALU_CONST_CACHE_VS_1:
1674 case SQ_ALU_CONST_CACHE_VS_2:
1675 case SQ_ALU_CONST_CACHE_VS_3:
1676 case SQ_ALU_CONST_CACHE_VS_4:
1677 case SQ_ALU_CONST_CACHE_VS_5:
1678 case SQ_ALU_CONST_CACHE_VS_6:
1679 case SQ_ALU_CONST_CACHE_VS_7:
1680 case SQ_ALU_CONST_CACHE_VS_8:
1681 case SQ_ALU_CONST_CACHE_VS_9:
1682 case SQ_ALU_CONST_CACHE_VS_10:
1683 case SQ_ALU_CONST_CACHE_VS_11:
1684 case SQ_ALU_CONST_CACHE_VS_12:
1685 case SQ_ALU_CONST_CACHE_VS_13:
1686 case SQ_ALU_CONST_CACHE_VS_14:
1687 case SQ_ALU_CONST_CACHE_VS_15:
1688 case SQ_ALU_CONST_CACHE_HS_0:
1689 case SQ_ALU_CONST_CACHE_HS_1:
1690 case SQ_ALU_CONST_CACHE_HS_2:
1691 case SQ_ALU_CONST_CACHE_HS_3:
1692 case SQ_ALU_CONST_CACHE_HS_4:
1693 case SQ_ALU_CONST_CACHE_HS_5:
1694 case SQ_ALU_CONST_CACHE_HS_6:
1695 case SQ_ALU_CONST_CACHE_HS_7:
1696 case SQ_ALU_CONST_CACHE_HS_8:
1697 case SQ_ALU_CONST_CACHE_HS_9:
1698 case SQ_ALU_CONST_CACHE_HS_10:
1699 case SQ_ALU_CONST_CACHE_HS_11:
1700 case SQ_ALU_CONST_CACHE_HS_12:
1701 case SQ_ALU_CONST_CACHE_HS_13:
1702 case SQ_ALU_CONST_CACHE_HS_14:
1703 case SQ_ALU_CONST_CACHE_HS_15:
1704 case SQ_ALU_CONST_CACHE_LS_0:
1705 case SQ_ALU_CONST_CACHE_LS_1:
1706 case SQ_ALU_CONST_CACHE_LS_2:
1707 case SQ_ALU_CONST_CACHE_LS_3:
1708 case SQ_ALU_CONST_CACHE_LS_4:
1709 case SQ_ALU_CONST_CACHE_LS_5:
1710 case SQ_ALU_CONST_CACHE_LS_6:
1711 case SQ_ALU_CONST_CACHE_LS_7:
1712 case SQ_ALU_CONST_CACHE_LS_8:
1713 case SQ_ALU_CONST_CACHE_LS_9:
1714 case SQ_ALU_CONST_CACHE_LS_10:
1715 case SQ_ALU_CONST_CACHE_LS_11:
1716 case SQ_ALU_CONST_CACHE_LS_12:
1717 case SQ_ALU_CONST_CACHE_LS_13:
1718 case SQ_ALU_CONST_CACHE_LS_14:
1719 case SQ_ALU_CONST_CACHE_LS_15:
1720 r = evergreen_cs_packet_next_reloc(p, &reloc);
1722 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1726 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1728 case SX_MEMORY_EXPORT_BASE:
1729 if (p->rdev->family >= CHIP_CAYMAN) {
1730 dev_warn(p->dev, "bad SET_CONFIG_REG "
1734 r = evergreen_cs_packet_next_reloc(p, &reloc);
1736 dev_warn(p->dev, "bad SET_CONFIG_REG "
1740 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1742 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1743 if (p->rdev->family < CHIP_CAYMAN) {
1744 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1748 r = evergreen_cs_packet_next_reloc(p, &reloc);
1750 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1754 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1757 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1760 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1766 static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1770 if (p->rdev->family >= CHIP_CAYMAN)
1771 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1773 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1776 if (i >= last_reg) {
1777 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1780 m = 1 << ((reg >> 2) & 31);
1781 if (p->rdev->family >= CHIP_CAYMAN) {
1782 if (!(cayman_reg_safe_bm[i] & m))
1785 if (!(evergreen_reg_safe_bm[i] & m))
1788 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1792 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1793 struct radeon_cs_packet *pkt)
1795 struct radeon_cs_reloc *reloc;
1796 struct evergreen_cs_track *track;
1800 unsigned start_reg, end_reg, reg;
1804 track = (struct evergreen_cs_track *)p->track;
1807 idx_value = radeon_get_ib_value(p, idx);
1809 switch (pkt->opcode) {
1810 case PACKET3_SET_PREDICATION:
1816 if (pkt->count != 1) {
1817 DRM_ERROR("bad SET PREDICATION\n");
1821 tmp = radeon_get_ib_value(p, idx + 1);
1822 pred_op = (tmp >> 16) & 0x7;
1824 /* for the clear predicate operation */
1829 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1833 r = evergreen_cs_packet_next_reloc(p, &reloc);
1835 DRM_ERROR("bad SET PREDICATION\n");
1839 offset = reloc->lobj.gpu_offset +
1840 (idx_value & 0xfffffff0) +
1841 ((u64)(tmp & 0xff) << 32);
1843 ib[idx + 0] = offset;
1844 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1847 case PACKET3_CONTEXT_CONTROL:
1848 if (pkt->count != 1) {
1849 DRM_ERROR("bad CONTEXT_CONTROL\n");
1853 case PACKET3_INDEX_TYPE:
1854 case PACKET3_NUM_INSTANCES:
1855 case PACKET3_CLEAR_STATE:
1857 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1861 case CAYMAN_PACKET3_DEALLOC_STATE:
1862 if (p->rdev->family < CHIP_CAYMAN) {
1863 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1867 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1871 case PACKET3_INDEX_BASE:
1875 if (pkt->count != 1) {
1876 DRM_ERROR("bad INDEX_BASE\n");
1879 r = evergreen_cs_packet_next_reloc(p, &reloc);
1881 DRM_ERROR("bad INDEX_BASE\n");
1885 offset = reloc->lobj.gpu_offset +
1887 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1890 ib[idx+1] = upper_32_bits(offset) & 0xff;
1892 r = evergreen_cs_track_check(p);
1894 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1899 case PACKET3_DRAW_INDEX:
1902 if (pkt->count != 3) {
1903 DRM_ERROR("bad DRAW_INDEX\n");
1906 r = evergreen_cs_packet_next_reloc(p, &reloc);
1908 DRM_ERROR("bad DRAW_INDEX\n");
1912 offset = reloc->lobj.gpu_offset +
1914 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1917 ib[idx+1] = upper_32_bits(offset) & 0xff;
1919 r = evergreen_cs_track_check(p);
1921 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1926 case PACKET3_DRAW_INDEX_2:
1930 if (pkt->count != 4) {
1931 DRM_ERROR("bad DRAW_INDEX_2\n");
1934 r = evergreen_cs_packet_next_reloc(p, &reloc);
1936 DRM_ERROR("bad DRAW_INDEX_2\n");
1940 offset = reloc->lobj.gpu_offset +
1941 radeon_get_ib_value(p, idx+1) +
1942 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1945 ib[idx+2] = upper_32_bits(offset) & 0xff;
1947 r = evergreen_cs_track_check(p);
1949 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1954 case PACKET3_DRAW_INDEX_AUTO:
1955 if (pkt->count != 1) {
1956 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1959 r = evergreen_cs_track_check(p);
1961 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1965 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1966 if (pkt->count != 2) {
1967 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1970 r = evergreen_cs_track_check(p);
1972 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1976 case PACKET3_DRAW_INDEX_IMMD:
1977 if (pkt->count < 2) {
1978 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1981 r = evergreen_cs_track_check(p);
1983 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1987 case PACKET3_DRAW_INDEX_OFFSET:
1988 if (pkt->count != 2) {
1989 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1992 r = evergreen_cs_track_check(p);
1994 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1998 case PACKET3_DRAW_INDEX_OFFSET_2:
1999 if (pkt->count != 3) {
2000 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2003 r = evergreen_cs_track_check(p);
2005 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2009 case PACKET3_DISPATCH_DIRECT:
2010 if (pkt->count != 3) {
2011 DRM_ERROR("bad DISPATCH_DIRECT\n");
2014 r = evergreen_cs_track_check(p);
2016 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2020 case PACKET3_DISPATCH_INDIRECT:
2021 if (pkt->count != 1) {
2022 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2025 r = evergreen_cs_packet_next_reloc(p, &reloc);
2027 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2030 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2031 r = evergreen_cs_track_check(p);
2033 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2037 case PACKET3_WAIT_REG_MEM:
2038 if (pkt->count != 5) {
2039 DRM_ERROR("bad WAIT_REG_MEM\n");
2042 /* bit 4 is reg (0) or mem (1) */
2043 if (idx_value & 0x10) {
2046 r = evergreen_cs_packet_next_reloc(p, &reloc);
2048 DRM_ERROR("bad WAIT_REG_MEM\n");
2052 offset = reloc->lobj.gpu_offset +
2053 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2054 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2056 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2057 ib[idx+2] = upper_32_bits(offset) & 0xff;
2060 case PACKET3_SURFACE_SYNC:
2061 if (pkt->count != 3) {
2062 DRM_ERROR("bad SURFACE_SYNC\n");
2065 /* 0xffffffff/0x0 is flush all cache flag */
2066 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2067 radeon_get_ib_value(p, idx + 2) != 0) {
2068 r = evergreen_cs_packet_next_reloc(p, &reloc);
2070 DRM_ERROR("bad SURFACE_SYNC\n");
2073 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2076 case PACKET3_EVENT_WRITE:
2077 if (pkt->count != 2 && pkt->count != 0) {
2078 DRM_ERROR("bad EVENT_WRITE\n");
2084 r = evergreen_cs_packet_next_reloc(p, &reloc);
2086 DRM_ERROR("bad EVENT_WRITE\n");
2089 offset = reloc->lobj.gpu_offset +
2090 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2091 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2093 ib[idx+1] = offset & 0xfffffff8;
2094 ib[idx+2] = upper_32_bits(offset) & 0xff;
2097 case PACKET3_EVENT_WRITE_EOP:
2101 if (pkt->count != 4) {
2102 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2105 r = evergreen_cs_packet_next_reloc(p, &reloc);
2107 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2111 offset = reloc->lobj.gpu_offset +
2112 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2113 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2115 ib[idx+1] = offset & 0xfffffffc;
2116 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2119 case PACKET3_EVENT_WRITE_EOS:
2123 if (pkt->count != 3) {
2124 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2127 r = evergreen_cs_packet_next_reloc(p, &reloc);
2129 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2133 offset = reloc->lobj.gpu_offset +
2134 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2135 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2137 ib[idx+1] = offset & 0xfffffffc;
2138 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2141 case PACKET3_SET_CONFIG_REG:
2142 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2143 end_reg = 4 * pkt->count + start_reg - 4;
2144 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2145 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2146 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2147 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2150 for (i = 0; i < pkt->count; i++) {
2151 reg = start_reg + (4 * i);
2152 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2157 case PACKET3_SET_CONTEXT_REG:
2158 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2159 end_reg = 4 * pkt->count + start_reg - 4;
2160 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2161 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2162 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2163 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2166 for (i = 0; i < pkt->count; i++) {
2167 reg = start_reg + (4 * i);
2168 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2173 case PACKET3_SET_RESOURCE:
2174 if (pkt->count % 8) {
2175 DRM_ERROR("bad SET_RESOURCE\n");
2178 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2179 end_reg = 4 * pkt->count + start_reg - 4;
2180 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2181 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2182 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2183 DRM_ERROR("bad SET_RESOURCE\n");
2186 for (i = 0; i < (pkt->count / 8); i++) {
2187 struct radeon_bo *texture, *mipmap;
2188 u32 toffset, moffset;
2191 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2192 case SQ_TEX_VTX_VALID_TEXTURE:
2194 r = evergreen_cs_packet_next_reloc(p, &reloc);
2196 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2199 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2200 ib[idx+1+(i*8)+1] |=
2201 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2202 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
2203 unsigned bankw, bankh, mtaspect, tile_split;
2205 evergreen_tiling_fields(reloc->lobj.tiling_flags,
2206 &bankw, &bankh, &mtaspect,
2208 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2209 ib[idx+1+(i*8)+7] |=
2210 TEX_BANK_WIDTH(bankw) |
2211 TEX_BANK_HEIGHT(bankh) |
2212 MACRO_TILE_ASPECT(mtaspect) |
2213 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2216 texture = reloc->robj;
2217 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2219 r = evergreen_cs_packet_next_reloc(p, &reloc);
2221 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2224 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2225 mipmap = reloc->robj;
2226 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2229 ib[idx+1+(i*8)+2] += toffset;
2230 ib[idx+1+(i*8)+3] += moffset;
2232 case SQ_TEX_VTX_VALID_BUFFER:
2236 r = evergreen_cs_packet_next_reloc(p, &reloc);
2238 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2241 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2242 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2243 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2244 /* force size to size of the buffer */
2245 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2246 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2249 offset64 = reloc->lobj.gpu_offset + offset;
2250 ib[idx+1+(i*8)+0] = offset64;
2251 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2252 (upper_32_bits(offset64) & 0xff);
2255 case SQ_TEX_VTX_INVALID_TEXTURE:
2256 case SQ_TEX_VTX_INVALID_BUFFER:
2258 DRM_ERROR("bad SET_RESOURCE\n");
2263 case PACKET3_SET_ALU_CONST:
2264 /* XXX fix me ALU const buffers only */
2266 case PACKET3_SET_BOOL_CONST:
2267 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2268 end_reg = 4 * pkt->count + start_reg - 4;
2269 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2270 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2271 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2272 DRM_ERROR("bad SET_BOOL_CONST\n");
2276 case PACKET3_SET_LOOP_CONST:
2277 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2278 end_reg = 4 * pkt->count + start_reg - 4;
2279 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2280 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2281 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2282 DRM_ERROR("bad SET_LOOP_CONST\n");
2286 case PACKET3_SET_CTL_CONST:
2287 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2288 end_reg = 4 * pkt->count + start_reg - 4;
2289 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2290 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2291 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2292 DRM_ERROR("bad SET_CTL_CONST\n");
2296 case PACKET3_SET_SAMPLER:
2297 if (pkt->count % 3) {
2298 DRM_ERROR("bad SET_SAMPLER\n");
2301 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2302 end_reg = 4 * pkt->count + start_reg - 4;
2303 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2304 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2305 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2306 DRM_ERROR("bad SET_SAMPLER\n");
2310 case PACKET3_STRMOUT_BUFFER_UPDATE:
2311 if (pkt->count != 4) {
2312 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2315 /* Updating memory at DST_ADDRESS. */
2316 if (idx_value & 0x1) {
2318 r = evergreen_cs_packet_next_reloc(p, &reloc);
2320 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2323 offset = radeon_get_ib_value(p, idx+1);
2324 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2325 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2326 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2327 offset + 4, radeon_bo_size(reloc->robj));
2330 offset += reloc->lobj.gpu_offset;
2332 ib[idx+2] = upper_32_bits(offset) & 0xff;
2334 /* Reading data from SRC_ADDRESS. */
2335 if (((idx_value >> 1) & 0x3) == 2) {
2337 r = evergreen_cs_packet_next_reloc(p, &reloc);
2339 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2342 offset = radeon_get_ib_value(p, idx+3);
2343 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2344 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2345 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2346 offset + 4, radeon_bo_size(reloc->robj));
2349 offset += reloc->lobj.gpu_offset;
2351 ib[idx+4] = upper_32_bits(offset) & 0xff;
2354 case PACKET3_COPY_DW:
2355 if (pkt->count != 4) {
2356 DRM_ERROR("bad COPY_DW (invalid count)\n");
2359 if (idx_value & 0x1) {
2361 /* SRC is memory. */
2362 r = evergreen_cs_packet_next_reloc(p, &reloc);
2364 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2367 offset = radeon_get_ib_value(p, idx+1);
2368 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2369 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2370 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2371 offset + 4, radeon_bo_size(reloc->robj));
2374 offset += reloc->lobj.gpu_offset;
2376 ib[idx+2] = upper_32_bits(offset) & 0xff;
2379 reg = radeon_get_ib_value(p, idx+1) << 2;
2380 if (!evergreen_is_safe_reg(p, reg, idx+1))
2383 if (idx_value & 0x2) {
2385 /* DST is memory. */
2386 r = evergreen_cs_packet_next_reloc(p, &reloc);
2388 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2391 offset = radeon_get_ib_value(p, idx+3);
2392 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2393 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2394 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2395 offset + 4, radeon_bo_size(reloc->robj));
2398 offset += reloc->lobj.gpu_offset;
2400 ib[idx+4] = upper_32_bits(offset) & 0xff;
2403 reg = radeon_get_ib_value(p, idx+3) << 2;
2404 if (!evergreen_is_safe_reg(p, reg, idx+3))
2411 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2417 int evergreen_cs_parse(struct radeon_cs_parser *p)
2419 struct radeon_cs_packet pkt;
2420 struct evergreen_cs_track *track;
2424 if (p->track == NULL) {
2425 /* initialize tracker, we are in kms */
2426 track = kzalloc(sizeof(*track), GFP_KERNEL);
2429 evergreen_cs_track_init(track);
2430 if (p->rdev->family >= CHIP_CAYMAN)
2431 tmp = p->rdev->config.cayman.tile_config;
2433 tmp = p->rdev->config.evergreen.tile_config;
2435 switch (tmp & 0xf) {
2451 switch ((tmp & 0xf0) >> 4) {
2464 switch ((tmp & 0xf00) >> 8) {
2466 track->group_size = 256;
2470 track->group_size = 512;
2474 switch ((tmp & 0xf000) >> 12) {
2476 track->row_size = 1;
2480 track->row_size = 2;
2483 track->row_size = 4;
2490 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
2496 p->idx += pkt.count + 2;
2499 r = evergreen_cs_parse_packet0(p, &pkt);
2504 r = evergreen_packet3_check(p, &pkt);
2507 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2517 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2519 for (r = 0; r < p->ib->length_dw; r++) {
2520 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
2530 static bool evergreen_vm_reg_valid(u32 reg)
2532 /* context regs are fine */
2536 /* check config regs */
2538 case GRBM_GFX_INDEX:
2539 case VGT_VTX_VECT_EJECT_REG:
2540 case VGT_CACHE_INVALIDATION:
2541 case VGT_GS_VERTEX_REUSE:
2542 case VGT_PRIMITIVE_TYPE:
2543 case VGT_INDEX_TYPE:
2544 case VGT_NUM_INDICES:
2545 case VGT_NUM_INSTANCES:
2546 case VGT_COMPUTE_DIM_X:
2547 case VGT_COMPUTE_DIM_Y:
2548 case VGT_COMPUTE_DIM_Z:
2549 case VGT_COMPUTE_START_X:
2550 case VGT_COMPUTE_START_Y:
2551 case VGT_COMPUTE_START_Z:
2552 case VGT_COMPUTE_INDEX:
2553 case VGT_COMPUTE_THREAD_GROUP_SIZE:
2554 case VGT_HS_OFFCHIP_PARAM:
2556 case PA_SU_LINE_STIPPLE_VALUE:
2557 case PA_SC_LINE_STIPPLE_STATE:
2559 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
2560 case SQ_DYN_GPR_SIMD_LOCK_EN:
2562 case SQ_GPR_RESOURCE_MGMT_1:
2563 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
2564 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
2565 case SQ_CONST_MEM_BASE:
2566 case SQ_STATIC_THREAD_MGMT_1:
2567 case SQ_STATIC_THREAD_MGMT_2:
2568 case SQ_STATIC_THREAD_MGMT_3:
2569 case SPI_CONFIG_CNTL:
2570 case SPI_CONFIG_CNTL_1:
2577 case TD_PS_BORDER_COLOR_INDEX:
2578 case TD_PS_BORDER_COLOR_RED:
2579 case TD_PS_BORDER_COLOR_GREEN:
2580 case TD_PS_BORDER_COLOR_BLUE:
2581 case TD_PS_BORDER_COLOR_ALPHA:
2582 case TD_VS_BORDER_COLOR_INDEX:
2583 case TD_VS_BORDER_COLOR_RED:
2584 case TD_VS_BORDER_COLOR_GREEN:
2585 case TD_VS_BORDER_COLOR_BLUE:
2586 case TD_VS_BORDER_COLOR_ALPHA:
2587 case TD_GS_BORDER_COLOR_INDEX:
2588 case TD_GS_BORDER_COLOR_RED:
2589 case TD_GS_BORDER_COLOR_GREEN:
2590 case TD_GS_BORDER_COLOR_BLUE:
2591 case TD_GS_BORDER_COLOR_ALPHA:
2592 case TD_HS_BORDER_COLOR_INDEX:
2593 case TD_HS_BORDER_COLOR_RED:
2594 case TD_HS_BORDER_COLOR_GREEN:
2595 case TD_HS_BORDER_COLOR_BLUE:
2596 case TD_HS_BORDER_COLOR_ALPHA:
2597 case TD_LS_BORDER_COLOR_INDEX:
2598 case TD_LS_BORDER_COLOR_RED:
2599 case TD_LS_BORDER_COLOR_GREEN:
2600 case TD_LS_BORDER_COLOR_BLUE:
2601 case TD_LS_BORDER_COLOR_ALPHA:
2602 case TD_CS_BORDER_COLOR_INDEX:
2603 case TD_CS_BORDER_COLOR_RED:
2604 case TD_CS_BORDER_COLOR_GREEN:
2605 case TD_CS_BORDER_COLOR_BLUE:
2606 case TD_CS_BORDER_COLOR_ALPHA:
2607 case SQ_ESGS_RING_SIZE:
2608 case SQ_GSVS_RING_SIZE:
2609 case SQ_ESTMP_RING_SIZE:
2610 case SQ_GSTMP_RING_SIZE:
2611 case SQ_HSTMP_RING_SIZE:
2612 case SQ_LSTMP_RING_SIZE:
2613 case SQ_PSTMP_RING_SIZE:
2614 case SQ_VSTMP_RING_SIZE:
2615 case SQ_ESGS_RING_ITEMSIZE:
2616 case SQ_ESTMP_RING_ITEMSIZE:
2617 case SQ_GSTMP_RING_ITEMSIZE:
2618 case SQ_GSVS_RING_ITEMSIZE:
2619 case SQ_GS_VERT_ITEMSIZE:
2620 case SQ_GS_VERT_ITEMSIZE_1:
2621 case SQ_GS_VERT_ITEMSIZE_2:
2622 case SQ_GS_VERT_ITEMSIZE_3:
2623 case SQ_GSVS_RING_OFFSET_1:
2624 case SQ_GSVS_RING_OFFSET_2:
2625 case SQ_GSVS_RING_OFFSET_3:
2626 case SQ_HSTMP_RING_ITEMSIZE:
2627 case SQ_LSTMP_RING_ITEMSIZE:
2628 case SQ_PSTMP_RING_ITEMSIZE:
2629 case SQ_VSTMP_RING_ITEMSIZE:
2630 case VGT_TF_RING_SIZE:
2631 case SQ_ESGS_RING_BASE:
2632 case SQ_GSVS_RING_BASE:
2633 case SQ_ESTMP_RING_BASE:
2634 case SQ_GSTMP_RING_BASE:
2635 case SQ_HSTMP_RING_BASE:
2636 case SQ_LSTMP_RING_BASE:
2637 case SQ_PSTMP_RING_BASE:
2638 case SQ_VSTMP_RING_BASE:
2639 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
2640 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
2647 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2648 u32 *ib, struct radeon_cs_packet *pkt)
2650 u32 idx = pkt->idx + 1;
2651 u32 idx_value = ib[idx];
2652 u32 start_reg, end_reg, reg, i;
2654 switch (pkt->opcode) {
2656 case PACKET3_SET_BASE:
2657 case PACKET3_CLEAR_STATE:
2658 case PACKET3_INDEX_BUFFER_SIZE:
2659 case PACKET3_DISPATCH_DIRECT:
2660 case PACKET3_DISPATCH_INDIRECT:
2661 case PACKET3_MODE_CONTROL:
2662 case PACKET3_SET_PREDICATION:
2663 case PACKET3_COND_EXEC:
2664 case PACKET3_PRED_EXEC:
2665 case PACKET3_DRAW_INDIRECT:
2666 case PACKET3_DRAW_INDEX_INDIRECT:
2667 case PACKET3_INDEX_BASE:
2668 case PACKET3_DRAW_INDEX_2:
2669 case PACKET3_CONTEXT_CONTROL:
2670 case PACKET3_DRAW_INDEX_OFFSET:
2671 case PACKET3_INDEX_TYPE:
2672 case PACKET3_DRAW_INDEX:
2673 case PACKET3_DRAW_INDEX_AUTO:
2674 case PACKET3_DRAW_INDEX_IMMD:
2675 case PACKET3_NUM_INSTANCES:
2676 case PACKET3_DRAW_INDEX_MULTI_AUTO:
2677 case PACKET3_STRMOUT_BUFFER_UPDATE:
2678 case PACKET3_DRAW_INDEX_OFFSET_2:
2679 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
2680 case PACKET3_MPEG_INDEX:
2681 case PACKET3_WAIT_REG_MEM:
2682 case PACKET3_MEM_WRITE:
2683 case PACKET3_SURFACE_SYNC:
2684 case PACKET3_EVENT_WRITE:
2685 case PACKET3_EVENT_WRITE_EOP:
2686 case PACKET3_EVENT_WRITE_EOS:
2687 case PACKET3_SET_CONTEXT_REG:
2688 case PACKET3_SET_BOOL_CONST:
2689 case PACKET3_SET_LOOP_CONST:
2690 case PACKET3_SET_RESOURCE:
2691 case PACKET3_SET_SAMPLER:
2692 case PACKET3_SET_CTL_CONST:
2693 case PACKET3_SET_RESOURCE_OFFSET:
2694 case PACKET3_SET_CONTEXT_REG_INDIRECT:
2695 case PACKET3_SET_RESOURCE_INDIRECT:
2696 case CAYMAN_PACKET3_DEALLOC_STATE:
2698 case PACKET3_COND_WRITE:
2699 if (idx_value & 0x100) {
2700 reg = ib[idx + 5] * 4;
2701 if (!evergreen_vm_reg_valid(reg))
2705 case PACKET3_COPY_DW:
2706 if (idx_value & 0x2) {
2707 reg = ib[idx + 3] * 4;
2708 if (!evergreen_vm_reg_valid(reg))
2712 case PACKET3_SET_CONFIG_REG:
2713 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2714 end_reg = 4 * pkt->count + start_reg - 4;
2715 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2716 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2717 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2718 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2721 for (i = 0; i < pkt->count; i++) {
2722 reg = start_reg + (4 * i);
2723 if (!evergreen_vm_reg_valid(reg))
2733 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2737 struct radeon_cs_packet pkt;
2741 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
2742 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
2746 dev_err(rdev->dev, "Packet0 not allowed!\n");
2753 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
2754 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
2755 idx += pkt.count + 2;
2758 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
2764 } while (idx < ib->length_dw);