]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nva3_pm.c
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nva3_pm.c
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_bios.h"
28 #include "nouveau_pm.h"
29
30 static u32 read_clk(struct drm_device *, int, bool);
31 static u32 read_pll(struct drm_device *, int, u32);
32
33 static u32
34 read_vco(struct drm_device *dev, int clk)
35 {
36         u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
37         if ((sctl & 0x00000030) != 0x00000030)
38                 return read_pll(dev, 0x41, 0x00e820);
39         return read_pll(dev, 0x42, 0x00e8a0);
40 }
41
42 static u32
43 read_clk(struct drm_device *dev, int clk, bool ignore_en)
44 {
45         struct drm_nouveau_private *dev_priv = dev->dev_private;
46         u32 sctl, sdiv, sclk;
47
48         /* refclk for the 0xe8xx plls is a fixed frequency */
49         if (clk >= 0x40) {
50                 if (dev_priv->chipset == 0xaf) {
51                         /* no joke.. seriously.. sigh.. */
52                         return nv_rd32(dev, 0x00471c) * 1000;
53                 }
54
55                 return dev_priv->crystal;
56         }
57
58         sctl = nv_rd32(dev, 0x4120 + (clk * 4));
59         if (!ignore_en && !(sctl & 0x00000100))
60                 return 0;
61
62         switch (sctl & 0x00003000) {
63         case 0x00000000:
64                 return dev_priv->crystal;
65         case 0x00002000:
66                 if (sctl & 0x00000040)
67                         return 108000;
68                 return 100000;
69         case 0x00003000:
70                 sclk = read_vco(dev, clk);
71                 sdiv = ((sctl & 0x003f0000) >> 16) + 2;
72                 return (sclk * 2) / sdiv;
73         default:
74                 return 0;
75         }
76 }
77
78 static u32
79 read_pll(struct drm_device *dev, int clk, u32 pll)
80 {
81         u32 ctrl = nv_rd32(dev, pll + 0);
82         u32 sclk = 0, P = 1, N = 1, M = 1;
83
84         if (!(ctrl & 0x00000008)) {
85                 if (ctrl & 0x00000001) {
86                         u32 coef = nv_rd32(dev, pll + 4);
87                         M = (coef & 0x000000ff) >> 0;
88                         N = (coef & 0x0000ff00) >> 8;
89                         P = (coef & 0x003f0000) >> 16;
90
91                         /* no post-divider on these.. */
92                         if ((pll & 0x00ff00) == 0x00e800)
93                                 P = 1;
94
95                         sclk = read_clk(dev, 0x00 + clk, false);
96                 }
97         } else {
98                 sclk = read_clk(dev, 0x10 + clk, false);
99         }
100
101         if (M * P)
102                 return sclk * N / (M * P);
103         return 0;
104 }
105
106 struct creg {
107         u32 clk;
108         u32 pll;
109 };
110
111 static int
112 calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
113 {
114         struct pll_lims limits;
115         u32 oclk, sclk, sdiv;
116         int P, N, M, diff;
117         int ret;
118
119         reg->pll = 0;
120         reg->clk = 0;
121         if (!khz) {
122                 NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk);
123                 return 0;
124         }
125
126         switch (khz) {
127         case 27000:
128                 reg->clk = 0x00000100;
129                 return khz;
130         case 100000:
131                 reg->clk = 0x00002100;
132                 return khz;
133         case 108000:
134                 reg->clk = 0x00002140;
135                 return khz;
136         default:
137                 sclk = read_vco(dev, clk);
138                 sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
139                 /* if the clock has a PLL attached, and we can get a within
140                  * [-2, 3) MHz of a divider, we'll disable the PLL and use
141                  * the divider instead.
142                  *
143                  * divider can go as low as 2, limited here because NVIDIA
144                  * and the VBIOS on my NVA8 seem to prefer using the PLL
145                  * for 810MHz - is there a good reason?
146                  */
147                 if (sdiv > 4) {
148                         oclk = (sclk * 2) / sdiv;
149                         diff = khz - oclk;
150                         if (!pll || (diff >= -2000 && diff < 3000)) {
151                                 reg->clk = (((sdiv - 2) << 16) | 0x00003100);
152                                 return oclk;
153                         }
154                 }
155
156                 if (!pll) {
157                         NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk);
158                         return -ERANGE;
159                 }
160
161                 break;
162         }
163
164         ret = get_pll_limits(dev, pll, &limits);
165         if (ret)
166                 return ret;
167
168         limits.refclk = read_clk(dev, clk - 0x10, true);
169         if (!limits.refclk)
170                 return -EINVAL;
171
172         ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
173         if (ret >= 0) {
174                 reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
175                 reg->pll = (P << 16) | (N << 8) | M;
176         }
177         return ret;
178 }
179
180 static void
181 prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
182 {
183         const u32 src0 = 0x004120 + (clk * 4);
184         const u32 src1 = 0x004160 + (clk * 4);
185         const u32 ctrl = pll + 0;
186         const u32 coef = pll + 4;
187
188         if (!reg->clk && !reg->pll) {
189                 NV_DEBUG(dev, "no clock for %02x\n", clk);
190                 return;
191         }
192
193         if (reg->pll) {
194                 nv_mask(dev, src0, 0x00000101, 0x00000101);
195                 nv_wr32(dev, coef, reg->pll);
196                 nv_mask(dev, ctrl, 0x00000015, 0x00000015);
197                 nv_mask(dev, ctrl, 0x00000010, 0x00000000);
198                 nv_wait(dev, ctrl, 0x00020000, 0x00020000);
199                 nv_mask(dev, ctrl, 0x00000010, 0x00000010);
200                 nv_mask(dev, ctrl, 0x00000008, 0x00000000);
201                 nv_mask(dev, src1, 0x00000100, 0x00000000);
202                 nv_mask(dev, src1, 0x00000001, 0x00000000);
203         } else {
204                 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
205                 nv_mask(dev, ctrl, 0x00000018, 0x00000018);
206                 udelay(20);
207                 nv_mask(dev, ctrl, 0x00000001, 0x00000000);
208                 nv_mask(dev, src0, 0x00000100, 0x00000000);
209                 nv_mask(dev, src0, 0x00000001, 0x00000000);
210         }
211 }
212
213 static void
214 prog_clk(struct drm_device *dev, int clk, struct creg *reg)
215 {
216         if (!reg->clk) {
217                 NV_DEBUG(dev, "no clock for %02x\n", clk);
218                 return;
219         }
220
221         nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
222 }
223
224 int
225 nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
226 {
227         perflvl->core   = read_pll(dev, 0x00, 0x4200);
228         perflvl->shader = read_pll(dev, 0x01, 0x4220);
229         perflvl->memory = read_pll(dev, 0x02, 0x4000);
230         perflvl->unka0  = read_clk(dev, 0x20, false);
231         perflvl->vdec   = read_clk(dev, 0x21, false);
232         perflvl->daemon = read_clk(dev, 0x25, false);
233         perflvl->copy   = perflvl->core;
234         return 0;
235 }
236
237 struct nva3_pm_state {
238         struct nouveau_pm_level *perflvl;
239
240         struct creg nclk;
241         struct creg sclk;
242         struct creg vdec;
243         struct creg unka0;
244
245         struct creg mclk;
246         u8 *rammap;
247         u8  rammap_ver;
248         u8  rammap_len;
249         u8 *ramcfg;
250         u8  ramcfg_len;
251         u32 r004018;
252         u32 r100760;
253 };
254
255 void *
256 nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
257 {
258         struct nva3_pm_state *info;
259         u8 ramcfg_cnt;
260         int ret;
261
262         info = kzalloc(sizeof(*info), GFP_KERNEL);
263         if (!info)
264                 return ERR_PTR(-ENOMEM);
265
266         ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
267         if (ret < 0)
268                 goto out;
269
270         ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
271         if (ret < 0)
272                 goto out;
273
274         ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
275         if (ret < 0)
276                 goto out;
277
278         ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
279         if (ret < 0)
280                 goto out;
281
282         ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
283         if (ret < 0)
284                 goto out;
285
286         info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
287                                            &info->rammap_ver,
288                                            &info->rammap_len,
289                                            &ramcfg_cnt, &info->ramcfg_len);
290         if (info->rammap_ver != 0x10 || info->rammap_len < 5)
291                 info->rammap = NULL;
292
293         info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
294                                            &info->rammap_ver,
295                                            &info->ramcfg_len);
296         if (info->rammap_ver != 0x10)
297                 info->ramcfg = NULL;
298
299         info->perflvl = perflvl;
300 out:
301         if (ret < 0) {
302                 kfree(info);
303                 info = ERR_PTR(ret);
304         }
305         return info;
306 }
307
308 static bool
309 nva3_pm_grcp_idle(void *data)
310 {
311         struct drm_device *dev = data;
312
313         if (!(nv_rd32(dev, 0x400304) & 0x00000001))
314                 return true;
315         if (nv_rd32(dev, 0x400308) == 0x0050001c)
316                 return true;
317         return false;
318 }
319
320 static void
321 mclk_precharge(struct nouveau_mem_exec_func *exec)
322 {
323         nv_wr32(exec->dev, 0x1002d4, 0x00000001);
324 }
325
326 static void
327 mclk_refresh(struct nouveau_mem_exec_func *exec)
328 {
329         nv_wr32(exec->dev, 0x1002d0, 0x00000001);
330 }
331
332 static void
333 mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
334 {
335         nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
336 }
337
338 static void
339 mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
340 {
341         nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
342 }
343
344 static void
345 mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
346 {
347         volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
348         udelay((nsec + 500) / 1000);
349 }
350
351 static u32
352 mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
353 {
354         if (mr <= 1)
355                 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
356         if (mr <= 3)
357                 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
358         return 0;
359 }
360
361 static void
362 mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
363 {
364         struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
365
366         if (mr <= 1) {
367                 if (dev_priv->vram_rank_B)
368                         nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
369                 nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
370         } else
371         if (mr <= 3) {
372                 if (dev_priv->vram_rank_B)
373                         nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
374                 nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
375         }
376 }
377
378 static void
379 mclk_clock_set(struct nouveau_mem_exec_func *exec)
380 {
381         struct drm_device *dev = exec->dev;
382         struct nva3_pm_state *info = exec->priv;
383         u32 ctrl;
384
385         ctrl = nv_rd32(dev, 0x004000);
386         if (!(ctrl & 0x00000008) && info->mclk.pll) {
387                 nv_wr32(dev, 0x004000, (ctrl |=  0x00000008));
388                 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
389                 nv_wr32(dev, 0x004018, 0x00001000);
390                 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
391                 nv_wr32(dev, 0x004004, info->mclk.pll);
392                 nv_wr32(dev, 0x004000, (ctrl |=  0x00000001));
393                 udelay(64);
394                 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
395                 udelay(20);
396         } else
397         if (!info->mclk.pll) {
398                 nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
399                 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
400                 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
401                 nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
402         }
403
404         if (info->rammap) {
405                 if (info->ramcfg && (info->rammap[4] & 0x08)) {
406                         u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
407                                       info->ramcfg[5];
408                         u32 unk5a4 = ROM16(info->ramcfg[7]);
409                         u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
410                                      (info->ramcfg[3] & 0x0f) << 16 |
411                                      (info->ramcfg[9] & 0x0f) |
412                                      0x80000000;
413                         nv_wr32(dev, 0x1005a0, unk5a0);
414                         nv_wr32(dev, 0x1005a4, unk5a4);
415                         nv_wr32(dev, 0x10f804, unk804);
416                         nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
417                 } else {
418                         nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
419                         nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
420                         nv_mask(dev, 0x100760, 0x22222222, info->r100760);
421                         nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
422                         nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
423                 }
424         }
425
426         if (info->mclk.pll) {
427                 nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
428                 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
429         }
430 }
431
432 static void
433 mclk_timing_set(struct nouveau_mem_exec_func *exec)
434 {
435         struct drm_device *dev = exec->dev;
436         struct nva3_pm_state *info = exec->priv;
437         struct nouveau_pm_level *perflvl = info->perflvl;
438         int i;
439
440         for (i = 0; i < 9; i++)
441                 nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
442
443         if (info->ramcfg) {
444                 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
445                 nv_mask(dev, 0x100200, 0x00001000, data);
446         }
447
448         if (info->ramcfg) {
449                 u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
450                 u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
451                 u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
452                 if ( (info->ramcfg[2] & 0x20))
453                         unk714 |= 0xf0000000;
454                 if (!(info->ramcfg[2] & 0x04))
455                         unk714 |= 0x00000010;
456                 nv_wr32(dev, 0x100714, unk714);
457
458                 if (info->ramcfg[2] & 0x01)
459                         unk71c |= 0x00000100;
460                 nv_wr32(dev, 0x10071c, unk71c);
461
462                 if (info->ramcfg[2] & 0x02)
463                         unk718 |= 0x00000100;
464                 nv_wr32(dev, 0x100718, unk718);
465
466                 if (info->ramcfg[2] & 0x10)
467                         nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
468         }
469 }
470
471 static void
472 prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
473 {
474         struct nouveau_mem_exec_func exec = {
475                 .dev = dev,
476                 .precharge = mclk_precharge,
477                 .refresh = mclk_refresh,
478                 .refresh_auto = mclk_refresh_auto,
479                 .refresh_self = mclk_refresh_self,
480                 .wait = mclk_wait,
481                 .mrg = mclk_mrg,
482                 .mrs = mclk_mrs,
483                 .clock_set = mclk_clock_set,
484                 .timing_set = mclk_timing_set,
485                 .priv = info
486         };
487         u32 ctrl;
488
489         /* XXX: where the fuck does 750MHz come from? */
490         if (info->perflvl->memory <= 750000) {
491                 info->r004018 = 0x10000000;
492                 info->r100760 = 0x22222222;
493         }
494
495         ctrl = nv_rd32(dev, 0x004000);
496         if (ctrl & 0x00000008) {
497                 if (info->mclk.pll) {
498                         nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
499                         nv_wr32(dev, 0x004004, info->mclk.pll);
500                         nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
501                         nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
502                         nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
503                         nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
504                         nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
505                         nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
506                 }
507         } else {
508                 u32 ssel = 0x00000101;
509                 if (info->mclk.clk)
510                         ssel |= info->mclk.clk;
511                 else
512                         ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
513                 nv_mask(dev, 0x004168, 0x003f3141, ctrl);
514         }
515
516         if (info->ramcfg) {
517                 if (info->ramcfg[2] & 0x10) {
518                         nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
519                 } else {
520                         nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
521                         nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
522                 }
523         }
524         if (info->rammap && !(info->rammap[4] & 0x02))
525                 nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
526         nv_wr32(dev, 0x611200, 0x00003300);
527         if (!(info->ramcfg[2] & 0x10))
528                 nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
529
530         nouveau_mem_exec(&exec, info->perflvl);
531
532         nv_wr32(dev, 0x611200, 0x00003330);
533         if (info->rammap && (info->rammap[4] & 0x02))
534                 nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
535         if (info->ramcfg) {
536                 if (info->ramcfg[2] & 0x10) {
537                         nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
538                         nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
539                 } else {
540                         nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
541                 }
542         }
543
544         if (info->mclk.pll) {
545                 nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
546                 nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
547         } else {
548                 nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
549                 nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
550                 nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
551         }
552 }
553
554 int
555 nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
556 {
557         struct drm_nouveau_private *dev_priv = dev->dev_private;
558         struct nva3_pm_state *info = pre_state;
559         unsigned long flags;
560         int ret = -EAGAIN;
561
562         /* prevent any new grctx switches from starting */
563         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
564         nv_wr32(dev, 0x400324, 0x00000000);
565         nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
566         /* wait for any pending grctx switches to complete */
567         if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) {
568                 NV_ERROR(dev, "pm: ctxprog didn't go idle\n");
569                 goto cleanup;
570         }
571         /* freeze PFIFO */
572         nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
573         if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) {
574                 NV_ERROR(dev, "pm: fifo didn't go idle\n");
575                 goto cleanup;
576         }
577
578         prog_pll(dev, 0x00, 0x004200, &info->nclk);
579         prog_pll(dev, 0x01, 0x004220, &info->sclk);
580         prog_clk(dev, 0x20, &info->unka0);
581         prog_clk(dev, 0x21, &info->vdec);
582
583         if (info->mclk.clk || info->mclk.pll)
584                 prog_mem(dev, info);
585
586         ret = 0;
587
588 cleanup:
589         /* unfreeze PFIFO */
590         nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
591         /* restore ctxprog to normal */
592         nv_wr32(dev, 0x400324, 0x00000000);
593         nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */
594         /* unblock it if necessary */
595         if (nv_rd32(dev, 0x400308) == 0x0050001c)
596                 nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
597         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
598         kfree(info);
599         return ret;
600 }