]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_stolen.c
Merge tag 'drm-intel-next-2013-08-09' of git://people.freedesktop.org/~danvet/drm...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_stolen.c
1 /*
2  * Copyright © 2008-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32
33 /*
34  * The BIOS typically reserves some of the system's memory for the exclusive
35  * use of the integrated graphics. This memory is no longer available for
36  * use by the OS and so the user finds that his system has less memory
37  * available than he put in. We refer to this memory as stolen.
38  *
39  * The BIOS will allocate its framebuffer from the stolen memory. Our
40  * goal is try to reuse that object for our own fbcon which must always
41  * be available for panics. Anything else we can reuse the stolen memory
42  * for is a boon.
43  */
44
45 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46 {
47         struct drm_i915_private *dev_priv = dev->dev_private;
48         struct resource *r;
49         u32 base;
50
51         /* Almost universally we can find the Graphics Base of Stolen Memory
52          * at offset 0x5c in the igfx configuration space. On a few (desktop)
53          * machines this is also mirrored in the bridge device at different
54          * locations, or in the MCHBAR. On gen2, the layout is again slightly
55          * different with the Graphics Segment immediately following Top of
56          * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
57          * reported by 865g, so we just use the top of memory as determined
58          * by the e820 probe.
59          *
60          * XXX However gen2 requires an unavailable symbol.
61          */
62         base = 0;
63         if (INTEL_INFO(dev)->gen >= 3) {
64                 /* Read Graphics Base of Stolen Memory directly */
65                 pci_read_config_dword(dev->pdev, 0x5c, &base);
66                 base &= ~((1<<20) - 1);
67         } else { /* GEN2 */
68 #if 0
69                 /* Stolen is immediately above Top of Memory */
70                 base = max_low_pfn_mapped << PAGE_SHIFT;
71 #endif
72         }
73
74         if (base == 0)
75                 return 0;
76
77         /* Verify that nothing else uses this physical address. Stolen
78          * memory should be reserved by the BIOS and hidden from the
79          * kernel. So if the region is already marked as busy, something
80          * is seriously wrong.
81          */
82         r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
83                                     "Graphics Stolen Memory");
84         if (r == NULL) {
85                 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
86                           base, base + (uint32_t)dev_priv->gtt.stolen_size);
87                 base = 0;
88         }
89
90         return base;
91 }
92
93 static int i915_setup_compression(struct drm_device *dev, int size)
94 {
95         struct drm_i915_private *dev_priv = dev->dev_private;
96         struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
97         int ret;
98
99         compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
100         if (!compressed_fb)
101                 goto err_llb;
102
103         /* Try to over-allocate to reduce reallocations and fragmentation */
104         ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
105                                  size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
106         if (ret)
107                 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
108                                          size >>= 1, 4096,
109                                          DRM_MM_SEARCH_DEFAULT);
110         if (ret)
111                 goto err_llb;
112
113         if (HAS_PCH_SPLIT(dev))
114                 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
115         else if (IS_GM45(dev)) {
116                 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
117         } else {
118                 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
119                 if (!compressed_llb)
120                         goto err_fb;
121
122                 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
123                                          4096, 4096, DRM_MM_SEARCH_DEFAULT);
124                 if (ret)
125                         goto err_fb;
126
127                 dev_priv->fbc.compressed_llb = compressed_llb;
128
129                 I915_WRITE(FBC_CFB_BASE,
130                            dev_priv->mm.stolen_base + compressed_fb->start);
131                 I915_WRITE(FBC_LL_BASE,
132                            dev_priv->mm.stolen_base + compressed_llb->start);
133         }
134
135         dev_priv->fbc.compressed_fb = compressed_fb;
136         dev_priv->fbc.size = size;
137
138         DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
139                       size);
140
141         return 0;
142
143 err_fb:
144         kfree(compressed_llb);
145         drm_mm_remove_node(compressed_fb);
146 err_llb:
147         kfree(compressed_fb);
148         pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
149         return -ENOSPC;
150 }
151
152 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
153 {
154         struct drm_i915_private *dev_priv = dev->dev_private;
155
156         if (!drm_mm_initialized(&dev_priv->mm.stolen))
157                 return -ENODEV;
158
159         if (size < dev_priv->fbc.size)
160                 return 0;
161
162         /* Release any current block */
163         i915_gem_stolen_cleanup_compression(dev);
164
165         return i915_setup_compression(dev, size);
166 }
167
168 void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
169 {
170         struct drm_i915_private *dev_priv = dev->dev_private;
171
172         if (dev_priv->fbc.size == 0)
173                 return;
174
175         if (dev_priv->fbc.compressed_fb) {
176                 drm_mm_remove_node(dev_priv->fbc.compressed_fb);
177                 kfree(dev_priv->fbc.compressed_fb);
178         }
179
180         if (dev_priv->fbc.compressed_llb) {
181                 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
182                 kfree(dev_priv->fbc.compressed_llb);
183         }
184
185         dev_priv->fbc.size = 0;
186 }
187
188 void i915_gem_cleanup_stolen(struct drm_device *dev)
189 {
190         struct drm_i915_private *dev_priv = dev->dev_private;
191
192         if (!drm_mm_initialized(&dev_priv->mm.stolen))
193                 return;
194
195         i915_gem_stolen_cleanup_compression(dev);
196         drm_mm_takedown(&dev_priv->mm.stolen);
197 }
198
199 int i915_gem_init_stolen(struct drm_device *dev)
200 {
201         struct drm_i915_private *dev_priv = dev->dev_private;
202         int bios_reserved = 0;
203
204         dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
205         if (dev_priv->mm.stolen_base == 0)
206                 return 0;
207
208         DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
209                       dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
210
211         if (IS_VALLEYVIEW(dev))
212                 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
213
214         if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
215                 return 0;
216
217         /* Basic memrange allocator for stolen space */
218         drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
219                     bios_reserved);
220
221         return 0;
222 }
223
224 static struct sg_table *
225 i915_pages_create_for_stolen(struct drm_device *dev,
226                              u32 offset, u32 size)
227 {
228         struct drm_i915_private *dev_priv = dev->dev_private;
229         struct sg_table *st;
230         struct scatterlist *sg;
231
232         DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
233         BUG_ON(offset > dev_priv->gtt.stolen_size - size);
234
235         /* We hide that we have no struct page backing our stolen object
236          * by wrapping the contiguous physical allocation with a fake
237          * dma mapping in a single scatterlist.
238          */
239
240         st = kmalloc(sizeof(*st), GFP_KERNEL);
241         if (st == NULL)
242                 return NULL;
243
244         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
245                 kfree(st);
246                 return NULL;
247         }
248
249         sg = st->sgl;
250         sg->offset = offset;
251         sg->length = size;
252
253         sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
254         sg_dma_len(sg) = size;
255
256         return st;
257 }
258
259 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
260 {
261         BUG();
262         return -EINVAL;
263 }
264
265 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
266 {
267         /* Should only be called during free */
268         sg_free_table(obj->pages);
269         kfree(obj->pages);
270 }
271
272 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
273         .get_pages = i915_gem_object_get_pages_stolen,
274         .put_pages = i915_gem_object_put_pages_stolen,
275 };
276
277 static struct drm_i915_gem_object *
278 _i915_gem_object_create_stolen(struct drm_device *dev,
279                                struct drm_mm_node *stolen)
280 {
281         struct drm_i915_gem_object *obj;
282
283         obj = i915_gem_object_alloc(dev);
284         if (obj == NULL)
285                 return NULL;
286
287         drm_gem_private_object_init(dev, &obj->base, stolen->size);
288         i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
289
290         obj->pages = i915_pages_create_for_stolen(dev,
291                                                   stolen->start, stolen->size);
292         if (obj->pages == NULL)
293                 goto cleanup;
294
295         obj->has_dma_mapping = true;
296         i915_gem_object_pin_pages(obj);
297         obj->stolen = stolen;
298
299         obj->base.write_domain = I915_GEM_DOMAIN_GTT;
300         obj->base.read_domains = I915_GEM_DOMAIN_GTT;
301         obj->cache_level = I915_CACHE_NONE;
302
303         return obj;
304
305 cleanup:
306         i915_gem_object_free(obj);
307         return NULL;
308 }
309
310 struct drm_i915_gem_object *
311 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
312 {
313         struct drm_i915_private *dev_priv = dev->dev_private;
314         struct drm_i915_gem_object *obj;
315         struct drm_mm_node *stolen;
316         int ret;
317
318         if (!drm_mm_initialized(&dev_priv->mm.stolen))
319                 return NULL;
320
321         DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
322         if (size == 0)
323                 return NULL;
324
325         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
326         if (!stolen)
327                 return NULL;
328
329         ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
330                                  4096, DRM_MM_SEARCH_DEFAULT);
331         if (ret) {
332                 kfree(stolen);
333                 return NULL;
334         }
335
336         obj = _i915_gem_object_create_stolen(dev, stolen);
337         if (obj)
338                 return obj;
339
340         drm_mm_remove_node(stolen);
341         kfree(stolen);
342         return NULL;
343 }
344
345 struct drm_i915_gem_object *
346 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
347                                                u32 stolen_offset,
348                                                u32 gtt_offset,
349                                                u32 size)
350 {
351         struct drm_i915_private *dev_priv = dev->dev_private;
352         struct i915_address_space *ggtt = &dev_priv->gtt.base;
353         struct drm_i915_gem_object *obj;
354         struct drm_mm_node *stolen;
355         struct i915_vma *vma;
356         int ret;
357
358         if (!drm_mm_initialized(&dev_priv->mm.stolen))
359                 return NULL;
360
361         DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
362                         stolen_offset, gtt_offset, size);
363
364         /* KISS and expect everything to be page-aligned */
365         BUG_ON(stolen_offset & 4095);
366         BUG_ON(size & 4095);
367
368         if (WARN_ON(size == 0))
369                 return NULL;
370
371         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
372         if (!stolen)
373                 return NULL;
374
375         stolen->start = stolen_offset;
376         stolen->size = size;
377         ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
378         if (ret) {
379                 DRM_DEBUG_KMS("failed to allocate stolen space\n");
380                 kfree(stolen);
381                 return NULL;
382         }
383
384         obj = _i915_gem_object_create_stolen(dev, stolen);
385         if (obj == NULL) {
386                 DRM_DEBUG_KMS("failed to allocate stolen object\n");
387                 drm_mm_remove_node(stolen);
388                 kfree(stolen);
389                 return NULL;
390         }
391
392         /* Some objects just need physical mem from stolen space */
393         if (gtt_offset == I915_GTT_OFFSET_NONE)
394                 return obj;
395
396         vma = i915_gem_vma_create(obj, ggtt);
397         if (IS_ERR(vma)) {
398                 ret = PTR_ERR(vma);
399                 goto err_out;
400         }
401
402         /* To simplify the initialisation sequence between KMS and GTT,
403          * we allow construction of the stolen object prior to
404          * setting up the GTT space. The actual reservation will occur
405          * later.
406          */
407         vma->node.start = gtt_offset;
408         vma->node.size = size;
409         if (drm_mm_initialized(&ggtt->mm)) {
410                 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
411                 if (ret) {
412                         DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
413                         i915_gem_vma_destroy(vma);
414                         goto err_out;
415                 }
416         }
417
418         obj->has_global_gtt_mapping = 1;
419
420         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
421         list_add_tail(&vma->mm_list, &ggtt->inactive_list);
422
423         return obj;
424
425 err_out:
426         drm_mm_remove_node(stolen);
427         kfree(stolen);
428         drm_gem_object_unreference(&obj->base);
429         return NULL;
430 }
431
432 void
433 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
434 {
435         if (obj->stolen) {
436                 drm_mm_remove_node(obj->stolen);
437                 kfree(obj->stolen);
438                 obj->stolen = NULL;
439         }
440 }