]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/gvt/cmd_parser.c
Merge tag 'samsung-defconfig-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / gvt / cmd_parser.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Ping Gao <ping.a.gao@intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Yulei Zhang <yulei.zhang@intel.com>
33  *    Zhi Wang <zhi.a.wang@intel.com>
34  *
35  */
36
37 #include <linux/slab.h>
38 #include "i915_drv.h"
39 #include "gvt.h"
40 #include "i915_pvinfo.h"
41 #include "trace.h"
42
43 #define INVALID_OP    (~0U)
44
45 #define OP_LEN_MI           9
46 #define OP_LEN_2D           10
47 #define OP_LEN_3D_MEDIA     16
48 #define OP_LEN_MFX_VC       16
49 #define OP_LEN_VEBOX        16
50
51 #define CMD_TYPE(cmd)   (((cmd) >> 29) & 7)
52
53 struct sub_op_bits {
54         int hi;
55         int low;
56 };
57 struct decode_info {
58         char *name;
59         int op_len;
60         int nr_sub_op;
61         struct sub_op_bits *sub_op;
62 };
63
64 #define   MAX_CMD_BUDGET                        0x7fffffff
65 #define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
66 #define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
67 #define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
68
69 #define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
70 #define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
71 #define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
72
73 /* Render Command Map */
74
75 /* MI_* command Opcode (28:23) */
76 #define OP_MI_NOOP                          0x0
77 #define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
78 #define OP_MI_USER_INTERRUPT                0x2
79 #define OP_MI_WAIT_FOR_EVENT                0x3
80 #define OP_MI_FLUSH                         0x4
81 #define OP_MI_ARB_CHECK                     0x5
82 #define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
83 #define OP_MI_REPORT_HEAD                   0x7
84 #define OP_MI_ARB_ON_OFF                    0x8
85 #define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
86 #define OP_MI_BATCH_BUFFER_END              0xA
87 #define OP_MI_SUSPEND_FLUSH                 0xB
88 #define OP_MI_PREDICATE                     0xC  /* IVB+ */
89 #define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
90 #define OP_MI_SET_APPID                     0xE  /* IVB+ */
91 #define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
92 #define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
93 #define OP_MI_DISPLAY_FLIP                  0x14
94 #define OP_MI_SEMAPHORE_MBOX                0x16
95 #define OP_MI_SET_CONTEXT                   0x18
96 #define OP_MI_MATH                          0x1A
97 #define OP_MI_URB_CLEAR                     0x19
98 #define OP_MI_SEMAPHORE_SIGNAL              0x1B  /* BDW+ */
99 #define OP_MI_SEMAPHORE_WAIT                0x1C  /* BDW+ */
100
101 #define OP_MI_STORE_DATA_IMM                0x20
102 #define OP_MI_STORE_DATA_INDEX              0x21
103 #define OP_MI_LOAD_REGISTER_IMM             0x22
104 #define OP_MI_UPDATE_GTT                    0x23
105 #define OP_MI_STORE_REGISTER_MEM            0x24
106 #define OP_MI_FLUSH_DW                      0x26
107 #define OP_MI_CLFLUSH                       0x27
108 #define OP_MI_REPORT_PERF_COUNT             0x28
109 #define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
110 #define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
111 #define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
112 #define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
113 #define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
114 #define OP_MI_2E                            0x2E  /* BDW+ */
115 #define OP_MI_2F                            0x2F  /* BDW+ */
116 #define OP_MI_BATCH_BUFFER_START            0x31
117
118 /* Bit definition for dword 0 */
119 #define _CMDBIT_BB_START_IN_PPGTT       (1UL << 8)
120
121 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
122
123 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
124 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
125 #define BATCH_BUFFER_ADR_SPACE_BIT(x)   (((x) >> 8) & 1U)
126 #define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
127
128 /* 2D command: Opcode (28:22) */
129 #define OP_2D(x)    ((2<<7) | x)
130
131 #define OP_XY_SETUP_BLT                             OP_2D(0x1)
132 #define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
133 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
134 #define OP_XY_PIXEL_BLT                             OP_2D(0x24)
135 #define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
136 #define OP_XY_TEXT_BLT                              OP_2D(0x26)
137 #define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
138 #define OP_XY_COLOR_BLT                             OP_2D(0x50)
139 #define OP_XY_PAT_BLT                               OP_2D(0x51)
140 #define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
141 #define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
142 #define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
143 #define OP_XY_FULL_BLT                              OP_2D(0x55)
144 #define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
145 #define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
146 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
147 #define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
148 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
149 #define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
150 #define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
151 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
152 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
153 #define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
154 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
155
156 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
157 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
158         ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
159
160 #define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
161
162 #define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
163 #define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
164 #define OP_3D_MEDIA_0_1_4                       OP_3D_MEDIA(0x0, 0x1, 0x04)
165
166 #define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
167
168 #define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
169
170 #define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
171 #define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
172 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
173 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
174 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
175
176 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
177 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
178 #define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
179 #define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
180
181 #define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
182 #define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
183 #define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
184 #define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
185 #define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
186 #define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
187 #define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
188 #define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
189 #define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
190 #define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
191 #define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
192 #define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
193 #define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
194 #define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
195 #define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
196 #define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
197 #define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
198 #define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
199 #define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
200 #define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
201 #define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
202 #define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
203 #define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
204 #define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
205 #define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
206 #define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
207 #define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
208 #define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
209 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
210 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
211 #define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
212 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
213 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
214 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
215 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
216 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
218 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
219 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
220 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
221 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
223 #define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
224 #define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
225 #define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
226 #define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
227 #define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
228 #define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
229 #define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
230 #define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
232 #define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
233 #define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
234 #define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
235 #define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
238 #define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
239 #define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
240 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
241 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
242 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
243 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
244 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
245 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
247
248 #define OP_3DSTATE_VF_INSTANCING                OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
249 #define OP_3DSTATE_VF_SGVS                      OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
250 #define OP_3DSTATE_VF_TOPOLOGY                  OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
251 #define OP_3DSTATE_WM_CHROMAKEY                 OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
252 #define OP_3DSTATE_PS_BLEND                     OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
253 #define OP_3DSTATE_WM_DEPTH_STENCIL             OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
254 #define OP_3DSTATE_PS_EXTRA                     OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
255 #define OP_3DSTATE_RASTER                       OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
256 #define OP_3DSTATE_SBE_SWIZ                     OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
257 #define OP_3DSTATE_WM_HZ_OP                     OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
258 #define OP_3DSTATE_COMPONENT_PACKING            OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
259
260 #define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
261 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
262 #define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
263 #define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
264 #define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
265 #define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
266 #define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
267 #define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
268 #define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
269 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
270 #define OP_3DSTATE_MULTISAMPLE_BDW              OP_3D_MEDIA(0x3, 0x0, 0x0D)
271 #define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
272 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
273 #define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
274 #define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
275 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
276 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
277 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
278 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
280 #define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
281 #define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
282 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
283 #define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
284 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
285 #define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
286 #define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
287 #define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
288
289 /* VCCP Command Parser */
290
291 /*
292  * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
293  * git://anongit.freedesktop.org/vaapi/intel-driver
294  * src/i965_defines.h
295  *
296  */
297
298 #define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
299         (3 << 13 | \
300          (pipeline) << 11 | \
301          (op) << 8 | \
302          (sub_opa) << 5 | \
303          (sub_opb))
304
305 #define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
306 #define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
307 #define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
308 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
309 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
310 #define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
311 #define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
312 #define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
313 #define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
314 #define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
315 #define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
316
317 #define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
318
319 #define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
320 #define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
321 #define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
322 #define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
323 #define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
324 #define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
325 #define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
326 #define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
327 #define OP_MFD_AVC_DPB_STATE                       OP_MFX(2, 1, 1, 6) /* IVB+ */
328 #define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
329 #define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
330 #define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
331
332 #define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
333 #define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
334 #define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
335 #define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
336 #define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
337
338 #define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
339 #define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
340 #define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
341 #define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
342 #define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
343
344 #define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
345 #define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
346 #define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
347
348 #define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
349 #define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
350 #define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
351
352 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
353         (3 << 13 | \
354          (pipeline) << 11 | \
355          (op) << 8 | \
356          (sub_opa) << 5 | \
357          (sub_opb))
358
359 #define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
360 #define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
361 #define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
362
363 struct parser_exec_state;
364
365 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
366
367 #define GVT_CMD_HASH_BITS   7
368
369 /* which DWords need address fix */
370 #define ADDR_FIX_1(x1)                  (1 << (x1))
371 #define ADDR_FIX_2(x1, x2)              (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
372 #define ADDR_FIX_3(x1, x2, x3)          (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
373 #define ADDR_FIX_4(x1, x2, x3, x4)      (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
374 #define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
375
376 struct cmd_info {
377         char *name;
378         u32 opcode;
379
380 #define F_LEN_MASK      (1U<<0)
381 #define F_LEN_CONST  1U
382 #define F_LEN_VAR    0U
383
384 /*
385  * command has its own ip advance logic
386  * e.g. MI_BATCH_START, MI_BATCH_END
387  */
388 #define F_IP_ADVANCE_CUSTOM (1<<1)
389
390 #define F_POST_HANDLE   (1<<2)
391         u32 flag;
392
393 #define R_RCS   (1 << RCS)
394 #define R_VCS1  (1 << VCS)
395 #define R_VCS2  (1 << VCS2)
396 #define R_VCS   (R_VCS1 | R_VCS2)
397 #define R_BCS   (1 << BCS)
398 #define R_VECS  (1 << VECS)
399 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
400         /* rings that support this cmd: BLT/RCS/VCS/VECS */
401         uint16_t rings;
402
403         /* devices that support this cmd: SNB/IVB/HSW/... */
404         uint16_t devices;
405
406         /* which DWords are address that need fix up.
407          * bit 0 means a 32-bit non address operand in command
408          * bit 1 means address operand, which could be 32-bit
409          * or 64-bit depending on different architectures.(
410          * defined by "gmadr_bytes_in_cmd" in intel_gvt.
411          * No matter the address length, each address only takes
412          * one bit in the bitmap.
413          */
414         uint16_t addr_bitmap;
415
416         /* flag == F_LEN_CONST : command length
417          * flag == F_LEN_VAR : length bias bits
418          * Note: length is in DWord
419          */
420         uint8_t len;
421
422         parser_cmd_handler handler;
423 };
424
425 struct cmd_entry {
426         struct hlist_node hlist;
427         struct cmd_info *info;
428 };
429
430 enum {
431         RING_BUFFER_INSTRUCTION,
432         BATCH_BUFFER_INSTRUCTION,
433         BATCH_BUFFER_2ND_LEVEL,
434 };
435
436 enum {
437         GTT_BUFFER,
438         PPGTT_BUFFER
439 };
440
441 struct parser_exec_state {
442         struct intel_vgpu *vgpu;
443         int ring_id;
444
445         int buf_type;
446
447         /* batch buffer address type */
448         int buf_addr_type;
449
450         /* graphics memory address of ring buffer start */
451         unsigned long ring_start;
452         unsigned long ring_size;
453         unsigned long ring_head;
454         unsigned long ring_tail;
455
456         /* instruction graphics memory address */
457         unsigned long ip_gma;
458
459         /* mapped va of the instr_gma */
460         void *ip_va;
461         void *rb_va;
462
463         void *ret_bb_va;
464         /* next instruction when return from  batch buffer to ring buffer */
465         unsigned long ret_ip_gma_ring;
466
467         /* next instruction when return from 2nd batch buffer to batch buffer */
468         unsigned long ret_ip_gma_bb;
469
470         /* batch buffer address type (GTT or PPGTT)
471          * used when ret from 2nd level batch buffer
472          */
473         int saved_buf_addr_type;
474
475         struct cmd_info *info;
476
477         struct intel_vgpu_workload *workload;
478 };
479
480 #define gmadr_dw_number(s)      \
481         (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482
483 static unsigned long bypass_scan_mask = 0;
484
485 /* ring ALL, type = 0 */
486 static struct sub_op_bits sub_op_mi[] = {
487         {31, 29},
488         {28, 23},
489 };
490
491 static struct decode_info decode_info_mi = {
492         "MI",
493         OP_LEN_MI,
494         ARRAY_SIZE(sub_op_mi),
495         sub_op_mi,
496 };
497
498 /* ring RCS, command type 2 */
499 static struct sub_op_bits sub_op_2d[] = {
500         {31, 29},
501         {28, 22},
502 };
503
504 static struct decode_info decode_info_2d = {
505         "2D",
506         OP_LEN_2D,
507         ARRAY_SIZE(sub_op_2d),
508         sub_op_2d,
509 };
510
511 /* ring RCS, command type 3 */
512 static struct sub_op_bits sub_op_3d_media[] = {
513         {31, 29},
514         {28, 27},
515         {26, 24},
516         {23, 16},
517 };
518
519 static struct decode_info decode_info_3d_media = {
520         "3D_Media",
521         OP_LEN_3D_MEDIA,
522         ARRAY_SIZE(sub_op_3d_media),
523         sub_op_3d_media,
524 };
525
526 /* ring VCS, command type 3 */
527 static struct sub_op_bits sub_op_mfx_vc[] = {
528         {31, 29},
529         {28, 27},
530         {26, 24},
531         {23, 21},
532         {20, 16},
533 };
534
535 static struct decode_info decode_info_mfx_vc = {
536         "MFX_VC",
537         OP_LEN_MFX_VC,
538         ARRAY_SIZE(sub_op_mfx_vc),
539         sub_op_mfx_vc,
540 };
541
542 /* ring VECS, command type 3 */
543 static struct sub_op_bits sub_op_vebox[] = {
544         {31, 29},
545         {28, 27},
546         {26, 24},
547         {23, 21},
548         {20, 16},
549 };
550
551 static struct decode_info decode_info_vebox = {
552         "VEBOX",
553         OP_LEN_VEBOX,
554         ARRAY_SIZE(sub_op_vebox),
555         sub_op_vebox,
556 };
557
558 static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
559         [RCS] = {
560                 &decode_info_mi,
561                 NULL,
562                 NULL,
563                 &decode_info_3d_media,
564                 NULL,
565                 NULL,
566                 NULL,
567                 NULL,
568         },
569
570         [VCS] = {
571                 &decode_info_mi,
572                 NULL,
573                 NULL,
574                 &decode_info_mfx_vc,
575                 NULL,
576                 NULL,
577                 NULL,
578                 NULL,
579         },
580
581         [BCS] = {
582                 &decode_info_mi,
583                 NULL,
584                 &decode_info_2d,
585                 NULL,
586                 NULL,
587                 NULL,
588                 NULL,
589                 NULL,
590         },
591
592         [VECS] = {
593                 &decode_info_mi,
594                 NULL,
595                 NULL,
596                 &decode_info_vebox,
597                 NULL,
598                 NULL,
599                 NULL,
600                 NULL,
601         },
602
603         [VCS2] = {
604                 &decode_info_mi,
605                 NULL,
606                 NULL,
607                 &decode_info_mfx_vc,
608                 NULL,
609                 NULL,
610                 NULL,
611                 NULL,
612         },
613 };
614
615 static inline u32 get_opcode(u32 cmd, int ring_id)
616 {
617         struct decode_info *d_info;
618
619         if (ring_id >= I915_NUM_ENGINES)
620                 return INVALID_OP;
621
622         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
623         if (d_info == NULL)
624                 return INVALID_OP;
625
626         return cmd >> (32 - d_info->op_len);
627 }
628
629 static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
630                 unsigned int opcode, int ring_id)
631 {
632         struct cmd_entry *e;
633
634         hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
635                 if ((opcode == e->info->opcode) &&
636                                 (e->info->rings & (1 << ring_id)))
637                         return e->info;
638         }
639         return NULL;
640 }
641
642 static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
643                 u32 cmd, int ring_id)
644 {
645         u32 opcode;
646
647         opcode = get_opcode(cmd, ring_id);
648         if (opcode == INVALID_OP)
649                 return NULL;
650
651         return find_cmd_entry(gvt, opcode, ring_id);
652 }
653
654 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
655 {
656         return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
657 }
658
659 static inline void print_opcode(u32 cmd, int ring_id)
660 {
661         struct decode_info *d_info;
662         int i;
663
664         if (ring_id >= I915_NUM_ENGINES)
665                 return;
666
667         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
668         if (d_info == NULL)
669                 return;
670
671         gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
672                         cmd >> (32 - d_info->op_len), d_info->name);
673
674         for (i = 0; i < d_info->nr_sub_op; i++)
675                 pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
676                                         d_info->sub_op[i].low));
677
678         pr_err("\n");
679 }
680
681 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
682 {
683         return s->ip_va + (index << 2);
684 }
685
686 static inline u32 cmd_val(struct parser_exec_state *s, int index)
687 {
688         return *cmd_ptr(s, index);
689 }
690
691 static void parser_exec_state_dump(struct parser_exec_state *s)
692 {
693         int cnt = 0;
694         int i;
695
696         gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
697                         " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
698                         s->ring_id, s->ring_start, s->ring_start + s->ring_size,
699                         s->ring_head, s->ring_tail);
700
701         gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
702                         s->buf_type == RING_BUFFER_INSTRUCTION ?
703                         "RING_BUFFER" : "BATCH_BUFFER",
704                         s->buf_addr_type == GTT_BUFFER ?
705                         "GTT" : "PPGTT", s->ip_gma);
706
707         if (s->ip_va == NULL) {
708                 gvt_dbg_cmd(" ip_va(NULL)");
709                 return;
710         }
711
712         gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
713                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
714                         cmd_val(s, 2), cmd_val(s, 3));
715
716         print_opcode(cmd_val(s, 0), s->ring_id);
717
718         /* print the whole page to trace */
719         pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
720                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
721                         cmd_val(s, 2), cmd_val(s, 3));
722
723         s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
724
725         while (cnt < 1024) {
726                 pr_err("ip_va=%p: ", s->ip_va);
727                 for (i = 0; i < 8; i++)
728                         pr_err("%08x ", cmd_val(s, i));
729                 pr_err("\n");
730
731                 s->ip_va += 8 * sizeof(u32);
732                 cnt += 8;
733         }
734 }
735
736 static inline void update_ip_va(struct parser_exec_state *s)
737 {
738         unsigned long len = 0;
739
740         if (WARN_ON(s->ring_head == s->ring_tail))
741                 return;
742
743         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
744                 unsigned long ring_top = s->ring_start + s->ring_size;
745
746                 if (s->ring_head > s->ring_tail) {
747                         if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
748                                 len = (s->ip_gma - s->ring_head);
749                         else if (s->ip_gma >= s->ring_start &&
750                                         s->ip_gma <= s->ring_tail)
751                                 len = (ring_top - s->ring_head) +
752                                         (s->ip_gma - s->ring_start);
753                 } else
754                         len = (s->ip_gma - s->ring_head);
755
756                 s->ip_va = s->rb_va + len;
757         } else {/* shadow batch buffer */
758                 s->ip_va = s->ret_bb_va;
759         }
760 }
761
762 static inline int ip_gma_set(struct parser_exec_state *s,
763                 unsigned long ip_gma)
764 {
765         WARN_ON(!IS_ALIGNED(ip_gma, 4));
766
767         s->ip_gma = ip_gma;
768         update_ip_va(s);
769         return 0;
770 }
771
772 static inline int ip_gma_advance(struct parser_exec_state *s,
773                 unsigned int dw_len)
774 {
775         s->ip_gma += (dw_len << 2);
776
777         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
778                 if (s->ip_gma >= s->ring_start + s->ring_size)
779                         s->ip_gma -= s->ring_size;
780                 update_ip_va(s);
781         } else {
782                 s->ip_va += (dw_len << 2);
783         }
784
785         return 0;
786 }
787
788 static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
789 {
790         if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
791                 return info->len;
792         else
793                 return (cmd & ((1U << info->len) - 1)) + 2;
794         return 0;
795 }
796
797 static inline int cmd_length(struct parser_exec_state *s)
798 {
799         return get_cmd_length(s->info, cmd_val(s, 0));
800 }
801
802 /* do not remove this, some platform may need clflush here */
803 #define patch_value(s, addr, val) do { \
804         *addr = val; \
805 } while (0)
806
807 static bool is_shadowed_mmio(unsigned int offset)
808 {
809         bool ret = false;
810
811         if ((offset == 0x2168) || /*BB current head register UDW */
812             (offset == 0x2140) || /*BB current header register */
813             (offset == 0x211c) || /*second BB header register UDW */
814             (offset == 0x2114)) { /*second BB header register UDW */
815                 ret = true;
816         }
817         return ret;
818 }
819
820 static inline bool is_force_nonpriv_mmio(unsigned int offset)
821 {
822         return (offset >= 0x24d0 && offset < 0x2500);
823 }
824
825 static int force_nonpriv_reg_handler(struct parser_exec_state *s,
826                                      unsigned int offset, unsigned int index)
827 {
828         struct intel_gvt *gvt = s->vgpu->gvt;
829         unsigned int data = cmd_val(s, index + 1);
830
831         if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
832                 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
833                         offset, data);
834                 return -EINVAL;
835         }
836         return 0;
837 }
838
839 static int cmd_reg_handler(struct parser_exec_state *s,
840         unsigned int offset, unsigned int index, char *cmd)
841 {
842         struct intel_vgpu *vgpu = s->vgpu;
843         struct intel_gvt *gvt = vgpu->gvt;
844
845         if (offset + 4 > gvt->device_info.mmio_size) {
846                 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
847                                 cmd, offset);
848                 return -EINVAL;
849         }
850
851         if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
852                 gvt_vgpu_err("%s access to non-render register (%x)\n",
853                                 cmd, offset);
854                 return 0;
855         }
856
857         if (is_shadowed_mmio(offset)) {
858                 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
859                 return 0;
860         }
861
862         if (is_force_nonpriv_mmio(offset) &&
863             force_nonpriv_reg_handler(s, offset, index))
864                 return -EINVAL;
865
866         if (offset == i915_mmio_reg_offset(DERRMR) ||
867                 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
868                 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
869                 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
870         }
871
872         /* TODO: Update the global mask if this MMIO is a masked-MMIO */
873         intel_gvt_mmio_set_cmd_accessed(gvt, offset);
874         return 0;
875 }
876
877 #define cmd_reg(s, i) \
878         (cmd_val(s, i) & GENMASK(22, 2))
879
880 #define cmd_reg_inhibit(s, i) \
881         (cmd_val(s, i) & GENMASK(22, 18))
882
883 #define cmd_gma(s, i) \
884         (cmd_val(s, i) & GENMASK(31, 2))
885
886 #define cmd_gma_hi(s, i) \
887         (cmd_val(s, i) & GENMASK(15, 0))
888
889 static int cmd_handler_lri(struct parser_exec_state *s)
890 {
891         int i, ret = 0;
892         int cmd_len = cmd_length(s);
893         struct intel_gvt *gvt = s->vgpu->gvt;
894
895         for (i = 1; i < cmd_len; i += 2) {
896                 if (IS_BROADWELL(gvt->dev_priv) &&
897                                 (s->ring_id != RCS)) {
898                         if (s->ring_id == BCS &&
899                                         cmd_reg(s, i) ==
900                                         i915_mmio_reg_offset(DERRMR))
901                                 ret |= 0;
902                         else
903                                 ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
904                 }
905                 if (ret)
906                         break;
907                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
908         }
909         return ret;
910 }
911
912 static int cmd_handler_lrr(struct parser_exec_state *s)
913 {
914         int i, ret = 0;
915         int cmd_len = cmd_length(s);
916
917         for (i = 1; i < cmd_len; i += 2) {
918                 if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
919                         ret |= ((cmd_reg_inhibit(s, i) ||
920                                         (cmd_reg_inhibit(s, i + 1)))) ?
921                                 -EINVAL : 0;
922                 if (ret)
923                         break;
924                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
925                 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
926         }
927         return ret;
928 }
929
930 static inline int cmd_address_audit(struct parser_exec_state *s,
931                 unsigned long guest_gma, int op_size, bool index_mode);
932
933 static int cmd_handler_lrm(struct parser_exec_state *s)
934 {
935         struct intel_gvt *gvt = s->vgpu->gvt;
936         int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
937         unsigned long gma;
938         int i, ret = 0;
939         int cmd_len = cmd_length(s);
940
941         for (i = 1; i < cmd_len;) {
942                 if (IS_BROADWELL(gvt->dev_priv))
943                         ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
944                 if (ret)
945                         break;
946                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
947                 if (cmd_val(s, 0) & (1 << 22)) {
948                         gma = cmd_gma(s, i + 1);
949                         if (gmadr_bytes == 8)
950                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
951                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
952                 }
953                 i += gmadr_dw_number(s) + 1;
954         }
955         return ret;
956 }
957
958 static int cmd_handler_srm(struct parser_exec_state *s)
959 {
960         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
961         unsigned long gma;
962         int i, ret = 0;
963         int cmd_len = cmd_length(s);
964
965         for (i = 1; i < cmd_len;) {
966                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
967                 if (cmd_val(s, 0) & (1 << 22)) {
968                         gma = cmd_gma(s, i + 1);
969                         if (gmadr_bytes == 8)
970                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
971                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
972                 }
973                 i += gmadr_dw_number(s) + 1;
974         }
975         return ret;
976 }
977
978 struct cmd_interrupt_event {
979         int pipe_control_notify;
980         int mi_flush_dw;
981         int mi_user_interrupt;
982 };
983
984 static struct cmd_interrupt_event cmd_interrupt_events[] = {
985         [RCS] = {
986                 .pipe_control_notify = RCS_PIPE_CONTROL,
987                 .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
988                 .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
989         },
990         [BCS] = {
991                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
992                 .mi_flush_dw = BCS_MI_FLUSH_DW,
993                 .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
994         },
995         [VCS] = {
996                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
997                 .mi_flush_dw = VCS_MI_FLUSH_DW,
998                 .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
999         },
1000         [VCS2] = {
1001                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1002                 .mi_flush_dw = VCS2_MI_FLUSH_DW,
1003                 .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1004         },
1005         [VECS] = {
1006                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1007                 .mi_flush_dw = VECS_MI_FLUSH_DW,
1008                 .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1009         },
1010 };
1011
1012 static int cmd_handler_pipe_control(struct parser_exec_state *s)
1013 {
1014         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1015         unsigned long gma;
1016         bool index_mode = false;
1017         unsigned int post_sync;
1018         int ret = 0;
1019
1020         post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1021
1022         /* LRI post sync */
1023         if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1024                 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1025         /* post sync */
1026         else if (post_sync) {
1027                 if (post_sync == 2)
1028                         ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1029                 else if (post_sync == 3)
1030                         ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1031                 else if (post_sync == 1) {
1032                         /* check ggtt*/
1033                         if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1034                                 gma = cmd_val(s, 2) & GENMASK(31, 3);
1035                                 if (gmadr_bytes == 8)
1036                                         gma |= (cmd_gma_hi(s, 3)) << 32;
1037                                 /* Store Data Index */
1038                                 if (cmd_val(s, 1) & (1 << 21))
1039                                         index_mode = true;
1040                                 ret |= cmd_address_audit(s, gma, sizeof(u64),
1041                                                 index_mode);
1042                         }
1043                 }
1044         }
1045
1046         if (ret)
1047                 return ret;
1048
1049         if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1050                 set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1051                                 s->workload->pending_events);
1052         return 0;
1053 }
1054
1055 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1056 {
1057         set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1058                         s->workload->pending_events);
1059         return 0;
1060 }
1061
1062 static int cmd_advance_default(struct parser_exec_state *s)
1063 {
1064         return ip_gma_advance(s, cmd_length(s));
1065 }
1066
1067 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1068 {
1069         int ret;
1070
1071         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1072                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1073                 ret = ip_gma_set(s, s->ret_ip_gma_bb);
1074                 s->buf_addr_type = s->saved_buf_addr_type;
1075         } else {
1076                 s->buf_type = RING_BUFFER_INSTRUCTION;
1077                 s->buf_addr_type = GTT_BUFFER;
1078                 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1079                         s->ret_ip_gma_ring -= s->ring_size;
1080                 ret = ip_gma_set(s, s->ret_ip_gma_ring);
1081         }
1082         return ret;
1083 }
1084
1085 struct mi_display_flip_command_info {
1086         int pipe;
1087         int plane;
1088         int event;
1089         i915_reg_t stride_reg;
1090         i915_reg_t ctrl_reg;
1091         i915_reg_t surf_reg;
1092         u64 stride_val;
1093         u64 tile_val;
1094         u64 surf_val;
1095         bool async_flip;
1096 };
1097
1098 struct plane_code_mapping {
1099         int pipe;
1100         int plane;
1101         int event;
1102 };
1103
1104 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1105                 struct mi_display_flip_command_info *info)
1106 {
1107         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1108         struct plane_code_mapping gen8_plane_code[] = {
1109                 [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1110                 [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1111                 [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1112                 [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1113                 [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1114                 [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1115         };
1116         u32 dword0, dword1, dword2;
1117         u32 v;
1118
1119         dword0 = cmd_val(s, 0);
1120         dword1 = cmd_val(s, 1);
1121         dword2 = cmd_val(s, 2);
1122
1123         v = (dword0 & GENMASK(21, 19)) >> 19;
1124         if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1125                 return -EINVAL;
1126
1127         info->pipe = gen8_plane_code[v].pipe;
1128         info->plane = gen8_plane_code[v].plane;
1129         info->event = gen8_plane_code[v].event;
1130         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1131         info->tile_val = (dword1 & 0x1);
1132         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1133         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1134
1135         if (info->plane == PLANE_A) {
1136                 info->ctrl_reg = DSPCNTR(info->pipe);
1137                 info->stride_reg = DSPSTRIDE(info->pipe);
1138                 info->surf_reg = DSPSURF(info->pipe);
1139         } else if (info->plane == PLANE_B) {
1140                 info->ctrl_reg = SPRCTL(info->pipe);
1141                 info->stride_reg = SPRSTRIDE(info->pipe);
1142                 info->surf_reg = SPRSURF(info->pipe);
1143         } else {
1144                 WARN_ON(1);
1145                 return -EINVAL;
1146         }
1147         return 0;
1148 }
1149
1150 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1151                 struct mi_display_flip_command_info *info)
1152 {
1153         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1154         struct intel_vgpu *vgpu = s->vgpu;
1155         u32 dword0 = cmd_val(s, 0);
1156         u32 dword1 = cmd_val(s, 1);
1157         u32 dword2 = cmd_val(s, 2);
1158         u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1159
1160         info->plane = PRIMARY_PLANE;
1161
1162         switch (plane) {
1163         case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1164                 info->pipe = PIPE_A;
1165                 info->event = PRIMARY_A_FLIP_DONE;
1166                 break;
1167         case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1168                 info->pipe = PIPE_B;
1169                 info->event = PRIMARY_B_FLIP_DONE;
1170                 break;
1171         case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1172                 info->pipe = PIPE_C;
1173                 info->event = PRIMARY_C_FLIP_DONE;
1174                 break;
1175
1176         case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1177                 info->pipe = PIPE_A;
1178                 info->event = SPRITE_A_FLIP_DONE;
1179                 info->plane = SPRITE_PLANE;
1180                 break;
1181         case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1182                 info->pipe = PIPE_B;
1183                 info->event = SPRITE_B_FLIP_DONE;
1184                 info->plane = SPRITE_PLANE;
1185                 break;
1186         case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1187                 info->pipe = PIPE_C;
1188                 info->event = SPRITE_C_FLIP_DONE;
1189                 info->plane = SPRITE_PLANE;
1190                 break;
1191
1192         default:
1193                 gvt_vgpu_err("unknown plane code %d\n", plane);
1194                 return -EINVAL;
1195         }
1196
1197         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1198         info->tile_val = (dword1 & GENMASK(2, 0));
1199         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1200         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1201
1202         info->ctrl_reg = DSPCNTR(info->pipe);
1203         info->stride_reg = DSPSTRIDE(info->pipe);
1204         info->surf_reg = DSPSURF(info->pipe);
1205
1206         return 0;
1207 }
1208
1209 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1210                 struct mi_display_flip_command_info *info)
1211 {
1212         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1213         u32 stride, tile;
1214
1215         if (!info->async_flip)
1216                 return 0;
1217
1218         if (IS_SKYLAKE(dev_priv)) {
1219                 stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1220                 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
1221                                 GENMASK(12, 10)) >> 10;
1222         } else {
1223                 stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
1224                                 GENMASK(15, 6)) >> 6;
1225                 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1226         }
1227
1228         if (stride != info->stride_val)
1229                 gvt_dbg_cmd("cannot change stride during async flip\n");
1230
1231         if (tile != info->tile_val)
1232                 gvt_dbg_cmd("cannot change tile during async flip\n");
1233
1234         return 0;
1235 }
1236
1237 static int gen8_update_plane_mmio_from_mi_display_flip(
1238                 struct parser_exec_state *s,
1239                 struct mi_display_flip_command_info *info)
1240 {
1241         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1242         struct intel_vgpu *vgpu = s->vgpu;
1243
1244         set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
1245                       info->surf_val << 12);
1246         if (IS_SKYLAKE(dev_priv)) {
1247                 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
1248                               info->stride_val);
1249                 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
1250                               info->tile_val << 10);
1251         } else {
1252                 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
1253                               info->stride_val << 6);
1254                 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
1255                               info->tile_val << 10);
1256         }
1257
1258         vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
1259         intel_vgpu_trigger_virtual_event(vgpu, info->event);
1260         return 0;
1261 }
1262
1263 static int decode_mi_display_flip(struct parser_exec_state *s,
1264                 struct mi_display_flip_command_info *info)
1265 {
1266         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1267
1268         if (IS_BROADWELL(dev_priv))
1269                 return gen8_decode_mi_display_flip(s, info);
1270         if (IS_SKYLAKE(dev_priv))
1271                 return skl_decode_mi_display_flip(s, info);
1272
1273         return -ENODEV;
1274 }
1275
1276 static int check_mi_display_flip(struct parser_exec_state *s,
1277                 struct mi_display_flip_command_info *info)
1278 {
1279         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1280
1281         if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
1282                 return gen8_check_mi_display_flip(s, info);
1283         return -ENODEV;
1284 }
1285
1286 static int update_plane_mmio_from_mi_display_flip(
1287                 struct parser_exec_state *s,
1288                 struct mi_display_flip_command_info *info)
1289 {
1290         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1291
1292         if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
1293                 return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1294         return -ENODEV;
1295 }
1296
1297 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1298 {
1299         struct mi_display_flip_command_info info;
1300         struct intel_vgpu *vgpu = s->vgpu;
1301         int ret;
1302         int i;
1303         int len = cmd_length(s);
1304
1305         ret = decode_mi_display_flip(s, &info);
1306         if (ret) {
1307                 gvt_vgpu_err("fail to decode MI display flip command\n");
1308                 return ret;
1309         }
1310
1311         ret = check_mi_display_flip(s, &info);
1312         if (ret) {
1313                 gvt_vgpu_err("invalid MI display flip command\n");
1314                 return ret;
1315         }
1316
1317         ret = update_plane_mmio_from_mi_display_flip(s, &info);
1318         if (ret) {
1319                 gvt_vgpu_err("fail to update plane mmio\n");
1320                 return ret;
1321         }
1322
1323         for (i = 0; i < len; i++)
1324                 patch_value(s, cmd_ptr(s, i), MI_NOOP);
1325         return 0;
1326 }
1327
1328 static bool is_wait_for_flip_pending(u32 cmd)
1329 {
1330         return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1331                         MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1332                         MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1333                         MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1334                         MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1335                         MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1336 }
1337
1338 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1339 {
1340         u32 cmd = cmd_val(s, 0);
1341
1342         if (!is_wait_for_flip_pending(cmd))
1343                 return 0;
1344
1345         patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1346         return 0;
1347 }
1348
1349 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1350 {
1351         unsigned long addr;
1352         unsigned long gma_high, gma_low;
1353         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1354
1355         if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
1356                 return INTEL_GVT_INVALID_ADDR;
1357
1358         gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1359         if (gmadr_bytes == 4) {
1360                 addr = gma_low;
1361         } else {
1362                 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1363                 addr = (((unsigned long)gma_high) << 32) | gma_low;
1364         }
1365         return addr;
1366 }
1367
1368 static inline int cmd_address_audit(struct parser_exec_state *s,
1369                 unsigned long guest_gma, int op_size, bool index_mode)
1370 {
1371         struct intel_vgpu *vgpu = s->vgpu;
1372         u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1373         int i;
1374         int ret;
1375
1376         if (op_size > max_surface_size) {
1377                 gvt_vgpu_err("command address audit fail name %s\n",
1378                         s->info->name);
1379                 return -EINVAL;
1380         }
1381
1382         if (index_mode) {
1383                 if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
1384                         ret = -EINVAL;
1385                         goto err;
1386                 }
1387         } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
1388                         (!vgpu_gmadr_is_valid(s->vgpu,
1389                                               guest_gma + op_size - 1))) {
1390                 ret = -EINVAL;
1391                 goto err;
1392         }
1393         return 0;
1394 err:
1395         gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1396                         s->info->name, guest_gma, op_size);
1397
1398         pr_err("cmd dump: ");
1399         for (i = 0; i < cmd_length(s); i++) {
1400                 if (!(i % 4))
1401                         pr_err("\n%08x ", cmd_val(s, i));
1402                 else
1403                         pr_err("%08x ", cmd_val(s, i));
1404         }
1405         pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1406                         vgpu->id,
1407                         vgpu_aperture_gmadr_base(vgpu),
1408                         vgpu_aperture_gmadr_end(vgpu),
1409                         vgpu_hidden_gmadr_base(vgpu),
1410                         vgpu_hidden_gmadr_end(vgpu));
1411         return ret;
1412 }
1413
1414 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1415 {
1416         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1417         int op_size = (cmd_length(s) - 3) * sizeof(u32);
1418         int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1419         unsigned long gma, gma_low, gma_high;
1420         int ret = 0;
1421
1422         /* check ppggt */
1423         if (!(cmd_val(s, 0) & (1 << 22)))
1424                 return 0;
1425
1426         gma = cmd_val(s, 2) & GENMASK(31, 2);
1427
1428         if (gmadr_bytes == 8) {
1429                 gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1430                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1431                 gma = (gma_high << 32) | gma_low;
1432                 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1433         }
1434         ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1435         return ret;
1436 }
1437
1438 static inline int unexpected_cmd(struct parser_exec_state *s)
1439 {
1440         struct intel_vgpu *vgpu = s->vgpu;
1441
1442         gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1443
1444         return -EINVAL;
1445 }
1446
1447 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1448 {
1449         return unexpected_cmd(s);
1450 }
1451
1452 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1453 {
1454         return unexpected_cmd(s);
1455 }
1456
1457 static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1458 {
1459         return unexpected_cmd(s);
1460 }
1461
1462 static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1463 {
1464         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1465         int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1466                         sizeof(u32);
1467         unsigned long gma, gma_high;
1468         int ret = 0;
1469
1470         if (!(cmd_val(s, 0) & (1 << 22)))
1471                 return ret;
1472
1473         gma = cmd_val(s, 1) & GENMASK(31, 2);
1474         if (gmadr_bytes == 8) {
1475                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1476                 gma = (gma_high << 32) | gma;
1477         }
1478         ret = cmd_address_audit(s, gma, op_size, false);
1479         return ret;
1480 }
1481
1482 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1483 {
1484         return unexpected_cmd(s);
1485 }
1486
1487 static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1488 {
1489         return unexpected_cmd(s);
1490 }
1491
1492 static int cmd_handler_mi_conditional_batch_buffer_end(
1493                 struct parser_exec_state *s)
1494 {
1495         return unexpected_cmd(s);
1496 }
1497
1498 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1499 {
1500         return unexpected_cmd(s);
1501 }
1502
1503 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1504 {
1505         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1506         unsigned long gma;
1507         bool index_mode = false;
1508         int ret = 0;
1509
1510         /* Check post-sync and ppgtt bit */
1511         if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1512                 gma = cmd_val(s, 1) & GENMASK(31, 3);
1513                 if (gmadr_bytes == 8)
1514                         gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1515                 /* Store Data Index */
1516                 if (cmd_val(s, 0) & (1 << 21))
1517                         index_mode = true;
1518                 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1519         }
1520         /* Check notify bit */
1521         if ((cmd_val(s, 0) & (1 << 8)))
1522                 set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1523                                 s->workload->pending_events);
1524         return ret;
1525 }
1526
1527 static void addr_type_update_snb(struct parser_exec_state *s)
1528 {
1529         if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1530                         (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1531                 s->buf_addr_type = PPGTT_BUFFER;
1532         }
1533 }
1534
1535
1536 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1537                 unsigned long gma, unsigned long end_gma, void *va)
1538 {
1539         unsigned long copy_len, offset;
1540         unsigned long len = 0;
1541         unsigned long gpa;
1542
1543         while (gma != end_gma) {
1544                 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1545                 if (gpa == INTEL_GVT_INVALID_ADDR) {
1546                         gvt_vgpu_err("invalid gma address: %lx\n", gma);
1547                         return -EFAULT;
1548                 }
1549
1550                 offset = gma & (GTT_PAGE_SIZE - 1);
1551
1552                 copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
1553                         GTT_PAGE_SIZE - offset : end_gma - gma;
1554
1555                 intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1556
1557                 len += copy_len;
1558                 gma += copy_len;
1559         }
1560         return 0;
1561 }
1562
1563
1564 /*
1565  * Check whether a batch buffer needs to be scanned. Currently
1566  * the only criteria is based on privilege.
1567  */
1568 static int batch_buffer_needs_scan(struct parser_exec_state *s)
1569 {
1570         struct intel_gvt *gvt = s->vgpu->gvt;
1571
1572         if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
1573                 /* BDW decides privilege based on address space */
1574                 if (cmd_val(s, 0) & (1 << 8))
1575                         return 0;
1576         }
1577         return 1;
1578 }
1579
1580 static uint32_t find_bb_size(struct parser_exec_state *s)
1581 {
1582         unsigned long gma = 0;
1583         struct cmd_info *info;
1584         uint32_t bb_size = 0;
1585         uint32_t cmd_len = 0;
1586         bool met_bb_end = false;
1587         struct intel_vgpu *vgpu = s->vgpu;
1588         u32 cmd;
1589
1590         /* get the start gm address of the batch buffer */
1591         gma = get_gma_bb_from_cmd(s, 1);
1592         cmd = cmd_val(s, 0);
1593
1594         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1595         if (info == NULL) {
1596                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1597                                 cmd, get_opcode(cmd, s->ring_id));
1598                 return -EINVAL;
1599         }
1600         do {
1601                 copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1602                                 gma, gma + 4, &cmd);
1603                 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1604                 if (info == NULL) {
1605                         gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1606                                 cmd, get_opcode(cmd, s->ring_id));
1607                         return -EINVAL;
1608                 }
1609
1610                 if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1611                         met_bb_end = true;
1612                 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1613                         if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
1614                                 /* chained batch buffer */
1615                                 met_bb_end = true;
1616                         }
1617                 }
1618                 cmd_len = get_cmd_length(info, cmd) << 2;
1619                 bb_size += cmd_len;
1620                 gma += cmd_len;
1621
1622         } while (!met_bb_end);
1623
1624         return bb_size;
1625 }
1626
1627 static int perform_bb_shadow(struct parser_exec_state *s)
1628 {
1629         struct intel_shadow_bb_entry *entry_obj;
1630         struct intel_vgpu *vgpu = s->vgpu;
1631         unsigned long gma = 0;
1632         uint32_t bb_size;
1633         void *dst = NULL;
1634         int ret = 0;
1635
1636         /* get the start gm address of the batch buffer */
1637         gma = get_gma_bb_from_cmd(s, 1);
1638
1639         /* get the size of the batch buffer */
1640         bb_size = find_bb_size(s);
1641
1642         /* allocate shadow batch buffer */
1643         entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
1644         if (entry_obj == NULL)
1645                 return -ENOMEM;
1646
1647         entry_obj->obj =
1648                 i915_gem_object_create(s->vgpu->gvt->dev_priv,
1649                                        roundup(bb_size, PAGE_SIZE));
1650         if (IS_ERR(entry_obj->obj)) {
1651                 ret = PTR_ERR(entry_obj->obj);
1652                 goto free_entry;
1653         }
1654         entry_obj->len = bb_size;
1655         INIT_LIST_HEAD(&entry_obj->list);
1656
1657         dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
1658         if (IS_ERR(dst)) {
1659                 ret = PTR_ERR(dst);
1660                 goto put_obj;
1661         }
1662
1663         ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1664         if (ret) {
1665                 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1666                 goto unmap_src;
1667         }
1668
1669         entry_obj->va = dst;
1670         entry_obj->bb_start_cmd_va = s->ip_va;
1671
1672         /* copy batch buffer to shadow batch buffer*/
1673         ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1674                               gma, gma + bb_size,
1675                               dst);
1676         if (ret) {
1677                 gvt_vgpu_err("fail to copy guest ring buffer\n");
1678                 goto unmap_src;
1679         }
1680
1681         list_add(&entry_obj->list, &s->workload->shadow_bb);
1682         /*
1683          * ip_va saves the virtual address of the shadow batch buffer, while
1684          * ip_gma saves the graphics address of the original batch buffer.
1685          * As the shadow batch buffer is just a copy from the originial one,
1686          * it should be right to use shadow batch buffer'va and original batch
1687          * buffer's gma in pair. After all, we don't want to pin the shadow
1688          * buffer here (too early).
1689          */
1690         s->ip_va = dst;
1691         s->ip_gma = gma;
1692
1693         return 0;
1694
1695 unmap_src:
1696         i915_gem_object_unpin_map(entry_obj->obj);
1697 put_obj:
1698         i915_gem_object_put(entry_obj->obj);
1699 free_entry:
1700         kfree(entry_obj);
1701         return ret;
1702 }
1703
1704 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1705 {
1706         bool second_level;
1707         int ret = 0;
1708         struct intel_vgpu *vgpu = s->vgpu;
1709
1710         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1711                 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1712                 return -EINVAL;
1713         }
1714
1715         second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1716         if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1717                 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1718                 return -EINVAL;
1719         }
1720
1721         s->saved_buf_addr_type = s->buf_addr_type;
1722         addr_type_update_snb(s);
1723         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1724                 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1725                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1726         } else if (second_level) {
1727                 s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1728                 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1729                 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1730         }
1731
1732         if (batch_buffer_needs_scan(s)) {
1733                 ret = perform_bb_shadow(s);
1734                 if (ret < 0)
1735                         gvt_vgpu_err("invalid shadow batch buffer\n");
1736         } else {
1737                 /* emulate a batch buffer end to do return right */
1738                 ret = cmd_handler_mi_batch_buffer_end(s);
1739                 if (ret < 0)
1740                         return ret;
1741         }
1742
1743         return ret;
1744 }
1745
1746 static struct cmd_info cmd_info[] = {
1747         {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1748
1749         {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1750                 0, 1, NULL},
1751
1752         {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1753                 0, 1, cmd_handler_mi_user_interrupt},
1754
1755         {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1756                 D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1757
1758         {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1759
1760         {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1761                 NULL},
1762
1763         {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1764                 NULL},
1765
1766         {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1767                 NULL},
1768
1769         {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1770                 NULL},
1771
1772         {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1773                 D_ALL, 0, 1, NULL},
1774
1775         {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
1776                 F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1777                 cmd_handler_mi_batch_buffer_end},
1778
1779         {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
1780                 0, 1, NULL},
1781
1782         {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1783                 NULL},
1784
1785         {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
1786                 D_ALL, 0, 1, NULL},
1787
1788         {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1789                 NULL},
1790
1791         {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1792                 NULL},
1793
1794         {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
1795                 R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
1796
1797         {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
1798                 0, 8, NULL},
1799
1800         {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
1801
1802         {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1803
1804         {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
1805                 D_BDW_PLUS, 0, 8, NULL},
1806
1807         {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1808                 ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
1809
1810         {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1811                 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
1812
1813         {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
1814                 0, 8, cmd_handler_mi_store_data_index},
1815
1816         {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
1817                 D_ALL, 0, 8, cmd_handler_lri},
1818
1819         {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
1820                 cmd_handler_mi_update_gtt},
1821
1822         {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
1823                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
1824
1825         {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
1826                 cmd_handler_mi_flush_dw},
1827
1828         {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
1829                 10, cmd_handler_mi_clflush},
1830
1831         {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
1832                 D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
1833
1834         {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
1835                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
1836
1837         {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
1838                 D_ALL, 0, 8, cmd_handler_lrr},
1839
1840         {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
1841                 D_ALL, 0, 8, NULL},
1842
1843         {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
1844                 ADDR_FIX_1(2), 8, NULL},
1845
1846         {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
1847                 ADDR_FIX_1(2), 8, NULL},
1848
1849         {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
1850                 8, cmd_handler_mi_op_2e},
1851
1852         {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
1853                 8, cmd_handler_mi_op_2f},
1854
1855         {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
1856                 F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
1857                 cmd_handler_mi_batch_buffer_start},
1858
1859         {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
1860                 F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
1861                 cmd_handler_mi_conditional_batch_buffer_end},
1862
1863         {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
1864                 R_RCS | R_BCS, D_ALL, 0, 2, NULL},
1865
1866         {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1867                 ADDR_FIX_2(4, 7), 8, NULL},
1868
1869         {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1870                 0, 8, NULL},
1871
1872         {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
1873                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1874
1875         {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1876
1877         {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
1878                 0, 8, NULL},
1879
1880         {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1881                 ADDR_FIX_1(3), 8, NULL},
1882
1883         {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
1884                 D_ALL, 0, 8, NULL},
1885
1886         {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
1887                 ADDR_FIX_1(4), 8, NULL},
1888
1889         {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1890                 ADDR_FIX_2(4, 5), 8, NULL},
1891
1892         {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1893                 ADDR_FIX_1(4), 8, NULL},
1894
1895         {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
1896                 ADDR_FIX_2(4, 7), 8, NULL},
1897
1898         {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
1899                 D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1900
1901         {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1902
1903         {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
1904                 D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
1905
1906         {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
1907                 R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1908
1909         {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1910                 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
1911                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1912
1913         {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
1914                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1915
1916         {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
1917                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1918
1919         {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
1920                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1921
1922         {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
1923                 D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1924
1925         {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
1926                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1927
1928         {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1929                 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
1930                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1931
1932         {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
1933                 ADDR_FIX_2(4, 5), 8, NULL},
1934
1935         {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
1936                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1937
1938         {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1939                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
1940                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1941
1942         {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1943                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
1944                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1945
1946         {"3DSTATE_BLEND_STATE_POINTERS",
1947                 OP_3DSTATE_BLEND_STATE_POINTERS,
1948                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1949
1950         {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1951                 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
1952                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1953
1954         {"3DSTATE_BINDING_TABLE_POINTERS_VS",
1955                 OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
1956                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1957
1958         {"3DSTATE_BINDING_TABLE_POINTERS_HS",
1959                 OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
1960                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1961
1962         {"3DSTATE_BINDING_TABLE_POINTERS_DS",
1963                 OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
1964                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1965
1966         {"3DSTATE_BINDING_TABLE_POINTERS_GS",
1967                 OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
1968                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1969
1970         {"3DSTATE_BINDING_TABLE_POINTERS_PS",
1971                 OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
1972                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1973
1974         {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
1975                 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
1976                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1977
1978         {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
1979                 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
1980                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1981
1982         {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
1983                 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
1984                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1985
1986         {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
1987                 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
1988                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1989
1990         {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
1991                 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
1992                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1993
1994         {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
1995                 0, 8, NULL},
1996
1997         {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
1998                 0, 8, NULL},
1999
2000         {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2001                 0, 8, NULL},
2002
2003         {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2004                 0, 8, NULL},
2005
2006         {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2007                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2008
2009         {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2010                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2011
2012         {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2013                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2014
2015         {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2016                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2017
2018         {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2019                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2020
2021         {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2022                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2023
2024         {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2025                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2026
2027         {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2028                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2029
2030         {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2031                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2032
2033         {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2034                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2035
2036         {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2037                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2038
2039         {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2040                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2041
2042         {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2043                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2044
2045         {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2046                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2047
2048         {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2049                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2050
2051         {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2052                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2053
2054         {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2055                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2056
2057         {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2058                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2059
2060         {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2061                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2062
2063         {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2064                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2065
2066         {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2067                 D_BDW_PLUS, 0, 8, NULL},
2068
2069         {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2070                 NULL},
2071
2072         {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2073                 D_BDW_PLUS, 0, 8, NULL},
2074
2075         {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2076                 D_BDW_PLUS, 0, 8, NULL},
2077
2078         {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2079                 8, NULL},
2080
2081         {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2082                 R_RCS, D_BDW_PLUS, 0, 8, NULL},
2083
2084         {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2085                 8, NULL},
2086
2087         {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2088                 NULL},
2089
2090         {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2091                 NULL},
2092
2093         {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2094                 NULL},
2095
2096         {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2097                 D_BDW_PLUS, 0, 8, NULL},
2098
2099         {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2100                 R_RCS, D_ALL, 0, 8, NULL},
2101
2102         {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2103                 D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2104
2105         {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2106                 R_RCS, D_ALL, 0, 1, NULL},
2107
2108         {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2109
2110         {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2111                 R_RCS, D_ALL, 0, 8, NULL},
2112
2113         {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2114                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2115
2116         {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2117
2118         {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2119
2120         {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2121
2122         {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2123                 D_BDW_PLUS, 0, 8, NULL},
2124
2125         {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2126                 D_BDW_PLUS, 0, 8, NULL},
2127
2128         {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2129                 D_ALL, 0, 8, NULL},
2130
2131         {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2132                 D_BDW_PLUS, 0, 8, NULL},
2133
2134         {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2135                 D_BDW_PLUS, 0, 8, NULL},
2136
2137         {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2138
2139         {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2140
2141         {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2142
2143         {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2144                 D_ALL, 0, 8, NULL},
2145
2146         {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2147
2148         {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2149
2150         {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2151                 R_RCS, D_ALL, 0, 8, NULL},
2152
2153         {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2154                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2155
2156         {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2157                 0, 8, NULL},
2158
2159         {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2160                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2161
2162         {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2163                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2164
2165         {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2166                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2167
2168         {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2169                 D_ALL, 0, 8, NULL},
2170
2171         {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2172                 D_ALL, 0, 8, NULL},
2173
2174         {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2175                 D_ALL, 0, 8, NULL},
2176
2177         {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2178                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2179
2180         {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2181                 D_BDW_PLUS, 0, 8, NULL},
2182
2183         {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2184                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2185
2186         {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2187                 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2188
2189         {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2190                 R_RCS, D_ALL, 0, 8, NULL},
2191
2192         {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2193                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2194
2195         {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2196                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2197
2198         {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2199                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2200
2201         {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2202                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2203
2204         {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2205                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2206
2207         {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2208                 R_RCS, D_ALL, 0, 8, NULL},
2209
2210         {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2211                 D_ALL, 0, 9, NULL},
2212
2213         {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2214                 ADDR_FIX_2(2, 4), 8, NULL},
2215
2216         {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2217                 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2218                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2219
2220         {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2221                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2222
2223         {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2224                 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2225                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2226
2227         {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2228                 D_BDW_PLUS, 0, 8, NULL},
2229
2230         {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2231                 ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2232
2233         {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2234
2235         {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2236                 1, NULL},
2237
2238         {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2239                 ADDR_FIX_1(1), 8, NULL},
2240
2241         {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2242
2243         {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2244                 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2245
2246         {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2247                 ADDR_FIX_1(1), 8, NULL},
2248
2249         {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2250
2251         {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2252
2253         {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2254                 0, 8, NULL},
2255
2256         {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2257                 D_SKL_PLUS, 0, 8, NULL},
2258
2259         {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2260                 F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2261
2262         {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2263                 0, 16, NULL},
2264
2265         {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2266                 0, 16, NULL},
2267
2268         {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2269
2270         {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2271                 0, 16, NULL},
2272
2273         {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2274                 0, 16, NULL},
2275
2276         {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2277                 0, 16, NULL},
2278
2279         {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2280                 0, 8, NULL},
2281
2282         {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2283                 NULL},
2284
2285         {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2286                 F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2287
2288         {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2289                 R_VCS, D_ALL, 0, 12, NULL},
2290
2291         {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2292                 R_VCS, D_ALL, 0, 12, NULL},
2293
2294         {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2295                 R_VCS, D_BDW_PLUS, 0, 12, NULL},
2296
2297         {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2298                 F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2299
2300         {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2301                 F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2302
2303         {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2304
2305         {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2306                 R_VCS, D_ALL, 0, 12, NULL},
2307
2308         {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2309                 R_VCS, D_ALL, 0, 12, NULL},
2310
2311         {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2312                 R_VCS, D_ALL, 0, 12, NULL},
2313
2314         {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2315                 R_VCS, D_ALL, 0, 12, NULL},
2316
2317         {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2318                 R_VCS, D_ALL, 0, 12, NULL},
2319
2320         {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2321                 R_VCS, D_ALL, 0, 12, NULL},
2322
2323         {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2324                 R_VCS, D_ALL, 0, 6, NULL},
2325
2326         {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2327                 R_VCS, D_ALL, 0, 12, NULL},
2328
2329         {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2330                 R_VCS, D_ALL, 0, 12, NULL},
2331
2332         {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2333                 R_VCS, D_ALL, 0, 12, NULL},
2334
2335         {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2336                 R_VCS, D_ALL, 0, 12, NULL},
2337
2338         {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2339                 R_VCS, D_ALL, 0, 12, NULL},
2340
2341         {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2342                 R_VCS, D_ALL, 0, 12, NULL},
2343
2344         {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2345                 R_VCS, D_ALL, 0, 12, NULL},
2346         {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2347                 R_VCS, D_ALL, 0, 12, NULL},
2348
2349         {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2350                 R_VCS, D_ALL, 0, 12, NULL},
2351
2352         {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2353                 R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2354
2355         {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2356                 R_VCS, D_ALL, 0, 12, NULL},
2357
2358         {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2359                 R_VCS, D_ALL, 0, 12, NULL},
2360
2361         {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2362                 R_VCS, D_ALL, 0, 12, NULL},
2363
2364         {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2365                 R_VCS, D_ALL, 0, 12, NULL},
2366
2367         {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2368                 R_VCS, D_ALL, 0, 12, NULL},
2369
2370         {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2371                 R_VCS, D_ALL, 0, 12, NULL},
2372
2373         {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2374                 R_VCS, D_ALL, 0, 12, NULL},
2375
2376         {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2377                 R_VCS, D_ALL, 0, 12, NULL},
2378
2379         {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2380                 R_VCS, D_ALL, 0, 12, NULL},
2381
2382         {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2383                 R_VCS, D_ALL, 0, 12, NULL},
2384
2385         {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2386                 R_VCS, D_ALL, 0, 12, NULL},
2387
2388         {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2389                 0, 16, NULL},
2390
2391         {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2392
2393         {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2394
2395         {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2396                 R_VCS, D_ALL, 0, 12, NULL},
2397
2398         {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2399                 R_VCS, D_ALL, 0, 12, NULL},
2400
2401         {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2402                 R_VCS, D_ALL, 0, 12, NULL},
2403
2404         {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2405
2406         {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2407                 0, 12, NULL},
2408
2409         {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2410                 0, 20, NULL},
2411 };
2412
2413 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2414 {
2415         hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2416 }
2417
2418 #define GVT_MAX_CMD_LENGTH     20  /* In Dword */
2419
2420 static void trace_cs_command(struct parser_exec_state *s,
2421                 cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
2422 {
2423         /* This buffer is used by ftrace to store all commands copied from
2424          * guest gma space. Sometimes commands can cross pages, this should
2425          * not be handled in ftrace logic. So this is just used as a
2426          * 'bounce buffer'
2427          */
2428         u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
2429         int i;
2430         u32 cmd_len = cmd_length(s);
2431         /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
2432          * following two considerations:
2433          * 1) From observation, most common ring commands is not that long.
2434          *    But there are execeptions. So it indeed makes sence to observe
2435          *    longer commands.
2436          * 2) From the performance and debugging point of view, dumping all
2437          *    contents of very commands is not necessary.
2438          * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
2439          * future for performance considerations.
2440          */
2441         if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
2442                 gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
2443                 cmd_len = GVT_MAX_CMD_LENGTH;
2444         }
2445
2446         for (i = 0; i < cmd_len; i++)
2447                 cmd_trace_buf[i] = cmd_val(s, i);
2448
2449         trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
2450                         cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
2451                         cost_pre_cmd_handler, cost_cmd_handler);
2452 }
2453
2454 /* call the cmd handler, and advance ip */
2455 static int cmd_parser_exec(struct parser_exec_state *s)
2456 {
2457         struct cmd_info *info;
2458         u32 cmd;
2459         int ret = 0;
2460         cycles_t t0, t1, t2;
2461         struct parser_exec_state s_before_advance_custom;
2462         struct intel_vgpu *vgpu = s->vgpu;
2463
2464         t0 = get_cycles();
2465
2466         cmd = cmd_val(s, 0);
2467
2468         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2469         if (info == NULL) {
2470                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2471                                 cmd, get_opcode(cmd, s->ring_id));
2472                 return -EINVAL;
2473         }
2474
2475         gvt_dbg_cmd("%s\n", info->name);
2476
2477         s->info = info;
2478
2479         t1 = get_cycles();
2480
2481         memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
2482
2483         if (info->handler) {
2484                 ret = info->handler(s);
2485                 if (ret < 0) {
2486                         gvt_vgpu_err("%s handler error\n", info->name);
2487                         return ret;
2488                 }
2489         }
2490         t2 = get_cycles();
2491
2492         trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
2493
2494         if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2495                 ret = cmd_advance_default(s);
2496                 if (ret) {
2497                         gvt_vgpu_err("%s IP advance error\n", info->name);
2498                         return ret;
2499                 }
2500         }
2501         return 0;
2502 }
2503
2504 static inline bool gma_out_of_range(unsigned long gma,
2505                 unsigned long gma_head, unsigned int gma_tail)
2506 {
2507         if (gma_tail >= gma_head)
2508                 return (gma < gma_head) || (gma > gma_tail);
2509         else
2510                 return (gma > gma_tail) && (gma < gma_head);
2511 }
2512
2513 static int command_scan(struct parser_exec_state *s,
2514                 unsigned long rb_head, unsigned long rb_tail,
2515                 unsigned long rb_start, unsigned long rb_len)
2516 {
2517
2518         unsigned long gma_head, gma_tail, gma_bottom;
2519         int ret = 0;
2520         struct intel_vgpu *vgpu = s->vgpu;
2521
2522         gma_head = rb_start + rb_head;
2523         gma_tail = rb_start + rb_tail;
2524         gma_bottom = rb_start +  rb_len;
2525
2526         gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
2527
2528         while (s->ip_gma != gma_tail) {
2529                 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2530                         if (!(s->ip_gma >= rb_start) ||
2531                                 !(s->ip_gma < gma_bottom)) {
2532                                 gvt_vgpu_err("ip_gma %lx out of ring scope."
2533                                         "(base:0x%lx, bottom: 0x%lx)\n",
2534                                         s->ip_gma, rb_start,
2535                                         gma_bottom);
2536                                 parser_exec_state_dump(s);
2537                                 return -EINVAL;
2538                         }
2539                         if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2540                                 gvt_vgpu_err("ip_gma %lx out of range."
2541                                         "base 0x%lx head 0x%lx tail 0x%lx\n",
2542                                         s->ip_gma, rb_start,
2543                                         rb_head, rb_tail);
2544                                 parser_exec_state_dump(s);
2545                                 break;
2546                         }
2547                 }
2548                 ret = cmd_parser_exec(s);
2549                 if (ret) {
2550                         gvt_vgpu_err("cmd parser error\n");
2551                         parser_exec_state_dump(s);
2552                         break;
2553                 }
2554         }
2555
2556         gvt_dbg_cmd("scan_end\n");
2557
2558         return ret;
2559 }
2560
2561 static int scan_workload(struct intel_vgpu_workload *workload)
2562 {
2563         unsigned long gma_head, gma_tail, gma_bottom;
2564         struct parser_exec_state s;
2565         int ret = 0;
2566
2567         /* ring base is page aligned */
2568         if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
2569                 return -EINVAL;
2570
2571         gma_head = workload->rb_start + workload->rb_head;
2572         gma_tail = workload->rb_start + workload->rb_tail;
2573         gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2574
2575         s.buf_type = RING_BUFFER_INSTRUCTION;
2576         s.buf_addr_type = GTT_BUFFER;
2577         s.vgpu = workload->vgpu;
2578         s.ring_id = workload->ring_id;
2579         s.ring_start = workload->rb_start;
2580         s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2581         s.ring_head = gma_head;
2582         s.ring_tail = gma_tail;
2583         s.rb_va = workload->shadow_ring_buffer_va;
2584         s.workload = workload;
2585
2586         if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2587                 gma_head == gma_tail)
2588                 return 0;
2589
2590         ret = ip_gma_set(&s, gma_head);
2591         if (ret)
2592                 goto out;
2593
2594         ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2595                 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2596
2597 out:
2598         return ret;
2599 }
2600
2601 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2602 {
2603
2604         unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2605         struct parser_exec_state s;
2606         int ret = 0;
2607
2608         /* ring base is page aligned */
2609         if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
2610                 return -EINVAL;
2611
2612         ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
2613         ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2614                         PAGE_SIZE);
2615         gma_head = wa_ctx->indirect_ctx.guest_gma;
2616         gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2617         gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2618
2619         s.buf_type = RING_BUFFER_INSTRUCTION;
2620         s.buf_addr_type = GTT_BUFFER;
2621         s.vgpu = wa_ctx->workload->vgpu;
2622         s.ring_id = wa_ctx->workload->ring_id;
2623         s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2624         s.ring_size = ring_size;
2625         s.ring_head = gma_head;
2626         s.ring_tail = gma_tail;
2627         s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2628         s.workload = wa_ctx->workload;
2629
2630         ret = ip_gma_set(&s, gma_head);
2631         if (ret)
2632                 goto out;
2633
2634         ret = command_scan(&s, 0, ring_tail,
2635                 wa_ctx->indirect_ctx.guest_gma, ring_size);
2636 out:
2637         return ret;
2638 }
2639
2640 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2641 {
2642         struct intel_vgpu *vgpu = workload->vgpu;
2643         int ring_id = workload->ring_id;
2644         struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
2645         struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
2646         unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2647         unsigned int copy_len = 0;
2648         int ret;
2649
2650         guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2651
2652         /* calculate workload ring buffer size */
2653         workload->rb_len = (workload->rb_tail + guest_rb_size -
2654                         workload->rb_head) % guest_rb_size;
2655
2656         gma_head = workload->rb_start + workload->rb_head;
2657         gma_tail = workload->rb_start + workload->rb_tail;
2658         gma_top = workload->rb_start + guest_rb_size;
2659
2660         /* allocate shadow ring buffer */
2661         ret = intel_ring_begin(workload->req, workload->rb_len / 4);
2662         if (ret)
2663                 return ret;
2664
2665         /* get shadow ring buffer va */
2666         workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
2667
2668         /* head > tail --> copy head <-> top */
2669         if (gma_head > gma_tail) {
2670                 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2671                                 gma_head, gma_top,
2672                                 workload->shadow_ring_buffer_va);
2673                 if (ret) {
2674                         gvt_vgpu_err("fail to copy guest ring buffer\n");
2675                         return ret;
2676                 }
2677                 copy_len = gma_top - gma_head;
2678                 gma_head = workload->rb_start;
2679         }
2680
2681         /* copy head or start <-> tail */
2682         ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2683                         gma_head, gma_tail,
2684                         workload->shadow_ring_buffer_va + copy_len);
2685         if (ret) {
2686                 gvt_vgpu_err("fail to copy guest ring buffer\n");
2687                 return ret;
2688         }
2689         ring->tail += workload->rb_len;
2690         intel_ring_advance(ring);
2691         return 0;
2692 }
2693
2694 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2695 {
2696         int ret;
2697         struct intel_vgpu *vgpu = workload->vgpu;
2698
2699         ret = shadow_workload_ring_buffer(workload);
2700         if (ret) {
2701                 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2702                 return ret;
2703         }
2704
2705         ret = scan_workload(workload);
2706         if (ret) {
2707                 gvt_vgpu_err("scan workload error\n");
2708                 return ret;
2709         }
2710         return 0;
2711 }
2712
2713 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2714 {
2715         int ctx_size = wa_ctx->indirect_ctx.size;
2716         unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2717         struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2718         struct drm_i915_gem_object *obj;
2719         int ret = 0;
2720         void *map;
2721
2722         obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
2723                                      roundup(ctx_size + CACHELINE_BYTES,
2724                                              PAGE_SIZE));
2725         if (IS_ERR(obj))
2726                 return PTR_ERR(obj);
2727
2728         /* get the va of the shadow batch buffer */
2729         map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2730         if (IS_ERR(map)) {
2731                 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2732                 ret = PTR_ERR(map);
2733                 goto put_obj;
2734         }
2735
2736         ret = i915_gem_object_set_to_cpu_domain(obj, false);
2737         if (ret) {
2738                 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2739                 goto unmap_src;
2740         }
2741
2742         ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
2743                                 wa_ctx->workload->vgpu->gtt.ggtt_mm,
2744                                 guest_gma, guest_gma + ctx_size,
2745                                 map);
2746         if (ret) {
2747                 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2748                 goto unmap_src;
2749         }
2750
2751         wa_ctx->indirect_ctx.obj = obj;
2752         wa_ctx->indirect_ctx.shadow_va = map;
2753         return 0;
2754
2755 unmap_src:
2756         i915_gem_object_unpin_map(obj);
2757 put_obj:
2758         i915_gem_object_put(wa_ctx->indirect_ctx.obj);
2759         return ret;
2760 }
2761
2762 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2763 {
2764         uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2765         unsigned char *bb_start_sva;
2766
2767         per_ctx_start[0] = 0x18800001;
2768         per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2769
2770         bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2771                                 wa_ctx->indirect_ctx.size;
2772
2773         memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2774
2775         return 0;
2776 }
2777
2778 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2779 {
2780         int ret;
2781         struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2782
2783         if (wa_ctx->indirect_ctx.size == 0)
2784                 return 0;
2785
2786         ret = shadow_indirect_ctx(wa_ctx);
2787         if (ret) {
2788                 gvt_vgpu_err("fail to shadow indirect ctx\n");
2789                 return ret;
2790         }
2791
2792         combine_wa_ctx(wa_ctx);
2793
2794         ret = scan_wa_ctx(wa_ctx);
2795         if (ret) {
2796                 gvt_vgpu_err("scan wa ctx error\n");
2797                 return ret;
2798         }
2799
2800         return 0;
2801 }
2802
2803 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2804                 unsigned int opcode, int rings)
2805 {
2806         struct cmd_info *info = NULL;
2807         unsigned int ring;
2808
2809         for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
2810                 info = find_cmd_entry(gvt, opcode, ring);
2811                 if (info)
2812                         break;
2813         }
2814         return info;
2815 }
2816
2817 static int init_cmd_table(struct intel_gvt *gvt)
2818 {
2819         int i;
2820         struct cmd_entry *e;
2821         struct cmd_info *info;
2822         unsigned int gen_type;
2823
2824         gen_type = intel_gvt_get_device_type(gvt);
2825
2826         for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
2827                 if (!(cmd_info[i].devices & gen_type))
2828                         continue;
2829
2830                 e = kzalloc(sizeof(*e), GFP_KERNEL);
2831                 if (!e)
2832                         return -ENOMEM;
2833
2834                 e->info = &cmd_info[i];
2835                 info = find_cmd_entry_any_ring(gvt,
2836                                 e->info->opcode, e->info->rings);
2837                 if (info) {
2838                         gvt_err("%s %s duplicated\n", e->info->name,
2839                                         info->name);
2840                         return -EEXIST;
2841                 }
2842
2843                 INIT_HLIST_NODE(&e->hlist);
2844                 add_cmd_entry(gvt, e);
2845                 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2846                                 e->info->name, e->info->opcode, e->info->flag,
2847                                 e->info->devices, e->info->rings);
2848         }
2849         return 0;
2850 }
2851
2852 static void clean_cmd_table(struct intel_gvt *gvt)
2853 {
2854         struct hlist_node *tmp;
2855         struct cmd_entry *e;
2856         int i;
2857
2858         hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
2859                 kfree(e);
2860
2861         hash_init(gvt->cmd_table);
2862 }
2863
2864 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
2865 {
2866         clean_cmd_table(gvt);
2867 }
2868
2869 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
2870 {
2871         int ret;
2872
2873         ret = init_cmd_table(gvt);
2874         if (ret) {
2875                 intel_gvt_clean_cmd_parser(gvt);
2876                 return ret;
2877         }
2878         return 0;
2879 }