]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/gpu/drm/i915/i915_gem_stolen.c
f2fs: xattr simplifications
[linux-beck.git] / drivers / gpu / drm / i915 / i915_gem_stolen.c
1 /*
2  * Copyright © 2008-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32
33 /*
34  * The BIOS typically reserves some of the system's memory for the exclusive
35  * use of the integrated graphics. This memory is no longer available for
36  * use by the OS and so the user finds that his system has less memory
37  * available than he put in. We refer to this memory as stolen.
38  *
39  * The BIOS will allocate its framebuffer from the stolen memory. Our
40  * goal is try to reuse that object for our own fbcon which must always
41  * be available for panics. Anything else we can reuse the stolen memory
42  * for is a boon.
43  */
44
45 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
46                                 struct drm_mm_node *node, u64 size,
47                                 unsigned alignment)
48 {
49         int ret;
50
51         if (!drm_mm_initialized(&dev_priv->mm.stolen))
52                 return -ENODEV;
53
54         mutex_lock(&dev_priv->mm.stolen_lock);
55         ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
56                                  DRM_MM_SEARCH_DEFAULT);
57         mutex_unlock(&dev_priv->mm.stolen_lock);
58
59         return ret;
60 }
61
62 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
63                                  struct drm_mm_node *node)
64 {
65         mutex_lock(&dev_priv->mm.stolen_lock);
66         drm_mm_remove_node(node);
67         mutex_unlock(&dev_priv->mm.stolen_lock);
68 }
69
70 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
71 {
72         struct drm_i915_private *dev_priv = dev->dev_private;
73         struct resource *r;
74         u32 base;
75
76         /* Almost universally we can find the Graphics Base of Stolen Memory
77          * at offset 0x5c in the igfx configuration space. On a few (desktop)
78          * machines this is also mirrored in the bridge device at different
79          * locations, or in the MCHBAR. On gen2, the layout is again slightly
80          * different with the Graphics Segment immediately following Top of
81          * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
82          * reported by 865g, so we just use the top of memory as determined
83          * by the e820 probe.
84          *
85          * XXX However gen2 requires an unavailable symbol.
86          */
87         base = 0;
88         if (INTEL_INFO(dev)->gen >= 3) {
89                 /* Read Graphics Base of Stolen Memory directly */
90                 pci_read_config_dword(dev->pdev, 0x5c, &base);
91                 base &= ~((1<<20) - 1);
92         } else { /* GEN2 */
93 #if 0
94                 /* Stolen is immediately above Top of Memory */
95                 base = max_low_pfn_mapped << PAGE_SHIFT;
96 #endif
97         }
98
99         if (base == 0)
100                 return 0;
101
102         /* make sure we don't clobber the GTT if it's within stolen memory */
103         if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
104                 struct {
105                         u32 start, end;
106                 } stolen[2] = {
107                         { .start = base, .end = base + dev_priv->gtt.stolen_size, },
108                         { .start = base, .end = base + dev_priv->gtt.stolen_size, },
109                 };
110                 u64 gtt_start, gtt_end;
111
112                 gtt_start = I915_READ(PGTBL_CTL);
113                 if (IS_GEN4(dev))
114                         gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
115                                 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
116                 else
117                         gtt_start &= PGTBL_ADDRESS_LO_MASK;
118                 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
119
120                 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
121                         stolen[0].end = gtt_start;
122                 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
123                         stolen[1].start = gtt_end;
124
125                 /* pick the larger of the two chunks */
126                 if (stolen[0].end - stolen[0].start >
127                     stolen[1].end - stolen[1].start) {
128                         base = stolen[0].start;
129                         dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
130                 } else {
131                         base = stolen[1].start;
132                         dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
133                 }
134
135                 if (stolen[0].start != stolen[1].start ||
136                     stolen[0].end != stolen[1].end) {
137                         DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
138                                       (unsigned long long) gtt_start,
139                                       (unsigned long long) gtt_end - 1);
140                         DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
141                                       base, base + (u32) dev_priv->gtt.stolen_size - 1);
142                 }
143         }
144
145
146         /* Verify that nothing else uses this physical address. Stolen
147          * memory should be reserved by the BIOS and hidden from the
148          * kernel. So if the region is already marked as busy, something
149          * is seriously wrong.
150          */
151         r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
152                                     "Graphics Stolen Memory");
153         if (r == NULL) {
154                 /*
155                  * One more attempt but this time requesting region from
156                  * base + 1, as we have seen that this resolves the region
157                  * conflict with the PCI Bus.
158                  * This is a BIOS w/a: Some BIOS wrap stolen in the root
159                  * PCI bus, but have an off-by-one error. Hence retry the
160                  * reservation starting from 1 instead of 0.
161                  */
162                 r = devm_request_mem_region(dev->dev, base + 1,
163                                             dev_priv->gtt.stolen_size - 1,
164                                             "Graphics Stolen Memory");
165                 /*
166                  * GEN3 firmware likes to smash pci bridges into the stolen
167                  * range. Apparently this works.
168                  */
169                 if (r == NULL && !IS_GEN3(dev)) {
170                         DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
171                                   base, base + (uint32_t)dev_priv->gtt.stolen_size);
172                         base = 0;
173                 }
174         }
175
176         return base;
177 }
178
179 void i915_gem_cleanup_stolen(struct drm_device *dev)
180 {
181         struct drm_i915_private *dev_priv = dev->dev_private;
182
183         if (!drm_mm_initialized(&dev_priv->mm.stolen))
184                 return;
185
186         drm_mm_takedown(&dev_priv->mm.stolen);
187 }
188
189 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
190                                      unsigned long *base, unsigned long *size)
191 {
192         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
193
194         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
195
196         switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
197         case GEN6_STOLEN_RESERVED_1M:
198                 *size = 1024 * 1024;
199                 break;
200         case GEN6_STOLEN_RESERVED_512K:
201                 *size = 512 * 1024;
202                 break;
203         case GEN6_STOLEN_RESERVED_256K:
204                 *size = 256 * 1024;
205                 break;
206         case GEN6_STOLEN_RESERVED_128K:
207                 *size = 128 * 1024;
208                 break;
209         default:
210                 *size = 1024 * 1024;
211                 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
212         }
213 }
214
215 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
216                                      unsigned long *base, unsigned long *size)
217 {
218         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
219
220         *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
221
222         switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
223         case GEN7_STOLEN_RESERVED_1M:
224                 *size = 1024 * 1024;
225                 break;
226         case GEN7_STOLEN_RESERVED_256K:
227                 *size = 256 * 1024;
228                 break;
229         default:
230                 *size = 1024 * 1024;
231                 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
232         }
233 }
234
235 static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
236                                      unsigned long *base, unsigned long *size)
237 {
238         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
239
240         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
241
242         switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
243         case GEN8_STOLEN_RESERVED_1M:
244                 *size = 1024 * 1024;
245                 break;
246         case GEN8_STOLEN_RESERVED_2M:
247                 *size = 2 * 1024 * 1024;
248                 break;
249         case GEN8_STOLEN_RESERVED_4M:
250                 *size = 4 * 1024 * 1024;
251                 break;
252         case GEN8_STOLEN_RESERVED_8M:
253                 *size = 8 * 1024 * 1024;
254                 break;
255         default:
256                 *size = 8 * 1024 * 1024;
257                 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
258         }
259 }
260
261 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
262                                     unsigned long *base, unsigned long *size)
263 {
264         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
265         unsigned long stolen_top;
266
267         stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
268
269         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
270
271         /* On these platforms, the register doesn't have a size field, so the
272          * size is the distance between the base and the top of the stolen
273          * memory. We also have the genuine case where base is zero and there's
274          * nothing reserved. */
275         if (*base == 0)
276                 *size = 0;
277         else
278                 *size = stolen_top - *base;
279 }
280
281 int i915_gem_init_stolen(struct drm_device *dev)
282 {
283         struct drm_i915_private *dev_priv = dev->dev_private;
284         unsigned long reserved_total, reserved_base, reserved_size;
285         unsigned long stolen_top;
286
287         mutex_init(&dev_priv->mm.stolen_lock);
288
289 #ifdef CONFIG_INTEL_IOMMU
290         if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
291                 DRM_INFO("DMAR active, disabling use of stolen memory\n");
292                 return 0;
293         }
294 #endif
295
296         if (dev_priv->gtt.stolen_size == 0)
297                 return 0;
298
299         dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
300         if (dev_priv->mm.stolen_base == 0)
301                 return 0;
302
303         stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
304
305         switch (INTEL_INFO(dev_priv)->gen) {
306         case 2:
307         case 3:
308         case 4:
309         case 5:
310                 /* Assume the gen6 maximum for the older platforms. */
311                 reserved_size = 1024 * 1024;
312                 reserved_base = stolen_top - reserved_size;
313                 break;
314         case 6:
315                 gen6_get_stolen_reserved(dev_priv, &reserved_base,
316                                          &reserved_size);
317                 break;
318         case 7:
319                 gen7_get_stolen_reserved(dev_priv, &reserved_base,
320                                          &reserved_size);
321                 break;
322         default:
323                 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
324                         bdw_get_stolen_reserved(dev_priv, &reserved_base,
325                                                 &reserved_size);
326                 else
327                         gen8_get_stolen_reserved(dev_priv, &reserved_base,
328                                                  &reserved_size);
329                 break;
330         }
331
332         /* It is possible for the reserved base to be zero, but the register
333          * field for size doesn't have a zero option. */
334         if (reserved_base == 0) {
335                 reserved_size = 0;
336                 reserved_base = stolen_top;
337         }
338
339         if (reserved_base < dev_priv->mm.stolen_base ||
340             reserved_base + reserved_size > stolen_top) {
341                 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
342                               reserved_base, reserved_base + reserved_size,
343                               dev_priv->mm.stolen_base, stolen_top);
344                 return 0;
345         }
346
347         /* It is possible for the reserved area to end before the end of stolen
348          * memory, so just consider the start. */
349         reserved_total = stolen_top - reserved_base;
350
351         DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
352                       dev_priv->gtt.stolen_size >> 10,
353                       (dev_priv->gtt.stolen_size - reserved_total) >> 10);
354
355         /* Basic memrange allocator for stolen space */
356         drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
357                     reserved_total);
358
359         return 0;
360 }
361
362 static struct sg_table *
363 i915_pages_create_for_stolen(struct drm_device *dev,
364                              u32 offset, u32 size)
365 {
366         struct drm_i915_private *dev_priv = dev->dev_private;
367         struct sg_table *st;
368         struct scatterlist *sg;
369
370         DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
371         BUG_ON(offset > dev_priv->gtt.stolen_size - size);
372
373         /* We hide that we have no struct page backing our stolen object
374          * by wrapping the contiguous physical allocation with a fake
375          * dma mapping in a single scatterlist.
376          */
377
378         st = kmalloc(sizeof(*st), GFP_KERNEL);
379         if (st == NULL)
380                 return NULL;
381
382         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
383                 kfree(st);
384                 return NULL;
385         }
386
387         sg = st->sgl;
388         sg->offset = 0;
389         sg->length = size;
390
391         sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
392         sg_dma_len(sg) = size;
393
394         return st;
395 }
396
397 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
398 {
399         BUG();
400         return -EINVAL;
401 }
402
403 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
404 {
405         /* Should only be called during free */
406         sg_free_table(obj->pages);
407         kfree(obj->pages);
408 }
409
410
411 static void
412 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
413 {
414         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
415
416         if (obj->stolen) {
417                 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
418                 kfree(obj->stolen);
419                 obj->stolen = NULL;
420         }
421 }
422 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
423         .get_pages = i915_gem_object_get_pages_stolen,
424         .put_pages = i915_gem_object_put_pages_stolen,
425         .release = i915_gem_object_release_stolen,
426 };
427
428 static struct drm_i915_gem_object *
429 _i915_gem_object_create_stolen(struct drm_device *dev,
430                                struct drm_mm_node *stolen)
431 {
432         struct drm_i915_gem_object *obj;
433
434         obj = i915_gem_object_alloc(dev);
435         if (obj == NULL)
436                 return NULL;
437
438         drm_gem_private_object_init(dev, &obj->base, stolen->size);
439         i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
440
441         obj->pages = i915_pages_create_for_stolen(dev,
442                                                   stolen->start, stolen->size);
443         if (obj->pages == NULL)
444                 goto cleanup;
445
446         i915_gem_object_pin_pages(obj);
447         obj->stolen = stolen;
448
449         obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
450         obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
451
452         return obj;
453
454 cleanup:
455         i915_gem_object_free(obj);
456         return NULL;
457 }
458
459 struct drm_i915_gem_object *
460 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
461 {
462         struct drm_i915_private *dev_priv = dev->dev_private;
463         struct drm_i915_gem_object *obj;
464         struct drm_mm_node *stolen;
465         int ret;
466
467         if (!drm_mm_initialized(&dev_priv->mm.stolen))
468                 return NULL;
469
470         DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
471         if (size == 0)
472                 return NULL;
473
474         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
475         if (!stolen)
476                 return NULL;
477
478         ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
479         if (ret) {
480                 kfree(stolen);
481                 return NULL;
482         }
483
484         obj = _i915_gem_object_create_stolen(dev, stolen);
485         if (obj)
486                 return obj;
487
488         i915_gem_stolen_remove_node(dev_priv, stolen);
489         kfree(stolen);
490         return NULL;
491 }
492
493 struct drm_i915_gem_object *
494 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
495                                                u32 stolen_offset,
496                                                u32 gtt_offset,
497                                                u32 size)
498 {
499         struct drm_i915_private *dev_priv = dev->dev_private;
500         struct i915_address_space *ggtt = &dev_priv->gtt.base;
501         struct drm_i915_gem_object *obj;
502         struct drm_mm_node *stolen;
503         struct i915_vma *vma;
504         int ret;
505
506         if (!drm_mm_initialized(&dev_priv->mm.stolen))
507                 return NULL;
508
509         DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
510                         stolen_offset, gtt_offset, size);
511
512         /* KISS and expect everything to be page-aligned */
513         if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
514             WARN_ON(stolen_offset & 4095))
515                 return NULL;
516
517         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
518         if (!stolen)
519                 return NULL;
520
521         stolen->start = stolen_offset;
522         stolen->size = size;
523         mutex_lock(&dev_priv->mm.stolen_lock);
524         ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
525         mutex_unlock(&dev_priv->mm.stolen_lock);
526         if (ret) {
527                 DRM_DEBUG_KMS("failed to allocate stolen space\n");
528                 kfree(stolen);
529                 return NULL;
530         }
531
532         obj = _i915_gem_object_create_stolen(dev, stolen);
533         if (obj == NULL) {
534                 DRM_DEBUG_KMS("failed to allocate stolen object\n");
535                 i915_gem_stolen_remove_node(dev_priv, stolen);
536                 kfree(stolen);
537                 return NULL;
538         }
539
540         /* Some objects just need physical mem from stolen space */
541         if (gtt_offset == I915_GTT_OFFSET_NONE)
542                 return obj;
543
544         vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
545         if (IS_ERR(vma)) {
546                 ret = PTR_ERR(vma);
547                 goto err_out;
548         }
549
550         /* To simplify the initialisation sequence between KMS and GTT,
551          * we allow construction of the stolen object prior to
552          * setting up the GTT space. The actual reservation will occur
553          * later.
554          */
555         vma->node.start = gtt_offset;
556         vma->node.size = size;
557         if (drm_mm_initialized(&ggtt->mm)) {
558                 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
559                 if (ret) {
560                         DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
561                         goto err_vma;
562                 }
563         }
564
565         vma->bound |= GLOBAL_BIND;
566
567         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
568         list_add_tail(&vma->mm_list, &ggtt->inactive_list);
569         i915_gem_object_pin_pages(obj);
570
571         return obj;
572
573 err_vma:
574         i915_gem_vma_destroy(vma);
575 err_out:
576         i915_gem_stolen_remove_node(dev_priv, stolen);
577         kfree(stolen);
578         drm_gem_object_unreference(&obj->base);
579         return NULL;
580 }