]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_stolen.c
Merge branch 'drm-next-3.11' of git://people.freedesktop.org/~agd5f/linux into drm...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_stolen.c
1 /*
2  * Copyright © 2008-2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32
33 /*
34  * The BIOS typically reserves some of the system's memory for the exclusive
35  * use of the integrated graphics. This memory is no longer available for
36  * use by the OS and so the user finds that his system has less memory
37  * available than he put in. We refer to this memory as stolen.
38  *
39  * The BIOS will allocate its framebuffer from the stolen memory. Our
40  * goal is try to reuse that object for our own fbcon which must always
41  * be available for panics. Anything else we can reuse the stolen memory
42  * for is a boon.
43  */
44
45 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46 {
47         struct drm_i915_private *dev_priv = dev->dev_private;
48         struct pci_dev *pdev = dev_priv->bridge_dev;
49         u32 base;
50
51         /* On the machines I have tested the Graphics Base of Stolen Memory
52          * is unreliable, so on those compute the base by subtracting the
53          * stolen memory from the Top of Low Usable DRAM which is where the
54          * BIOS places the graphics stolen memory.
55          *
56          * On gen2, the layout is slightly different with the Graphics Segment
57          * immediately following Top of Memory (or Top of Usable DRAM). Note
58          * it appears that TOUD is only reported by 865g, so we just use the
59          * top of memory as determined by the e820 probe.
60          *
61          * XXX gen2 requires an unavailable symbol and 945gm fails with
62          * its value of TOLUD.
63          */
64         base = 0;
65         if (IS_VALLEYVIEW(dev)) {
66                 pci_read_config_dword(dev->pdev, 0x5c, &base);
67                 base &= ~((1<<20) - 1);
68         } else if (INTEL_INFO(dev)->gen >= 6) {
69                 /* Read Base Data of Stolen Memory Register (BDSM) directly.
70                  * Note that there is also a MCHBAR miror at 0x1080c0 or
71                  * we could use device 2:0x5c instead.
72                 */
73                 pci_read_config_dword(pdev, 0xB0, &base);
74                 base &= ~4095; /* lower bits used for locking register */
75         } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
76                 /* Read Graphics Base of Stolen Memory directly */
77                 pci_read_config_dword(pdev, 0xA4, &base);
78 #if 0
79         } else if (IS_GEN3(dev)) {
80                 u8 val;
81                 /* Stolen is immediately below Top of Low Usable DRAM */
82                 pci_read_config_byte(pdev, 0x9c, &val);
83                 base = val >> 3 << 27;
84                 base -= dev_priv->mm.gtt->stolen_size;
85         } else {
86                 /* Stolen is immediately above Top of Memory */
87                 base = max_low_pfn_mapped << PAGE_SHIFT;
88 #endif
89         }
90
91         return base;
92 }
93
94 static int i915_setup_compression(struct drm_device *dev, int size)
95 {
96         struct drm_i915_private *dev_priv = dev->dev_private;
97         struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
98
99         /* Try to over-allocate to reduce reallocations and fragmentation */
100         compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
101                                            size <<= 1, 4096, 0);
102         if (!compressed_fb)
103                 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
104                                                    size >>= 1, 4096, 0);
105         if (compressed_fb)
106                 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
107         if (!compressed_fb)
108                 goto err;
109
110         if (HAS_PCH_SPLIT(dev))
111                 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
112         else if (IS_GM45(dev)) {
113                 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
114         } else {
115                 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
116                                                     4096, 4096, 0);
117                 if (compressed_llb)
118                         compressed_llb = drm_mm_get_block(compressed_llb,
119                                                           4096, 4096);
120                 if (!compressed_llb)
121                         goto err_fb;
122
123                 dev_priv->compressed_llb = compressed_llb;
124
125                 I915_WRITE(FBC_CFB_BASE,
126                            dev_priv->mm.stolen_base + compressed_fb->start);
127                 I915_WRITE(FBC_LL_BASE,
128                            dev_priv->mm.stolen_base + compressed_llb->start);
129         }
130
131         dev_priv->compressed_fb = compressed_fb;
132         dev_priv->cfb_size = size;
133
134         DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
135                       size);
136
137         return 0;
138
139 err_fb:
140         drm_mm_put_block(compressed_fb);
141 err:
142         pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
143         return -ENOSPC;
144 }
145
146 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
147 {
148         struct drm_i915_private *dev_priv = dev->dev_private;
149
150         if (dev_priv->mm.stolen_base == 0)
151                 return -ENODEV;
152
153         if (size < dev_priv->cfb_size)
154                 return 0;
155
156         /* Release any current block */
157         i915_gem_stolen_cleanup_compression(dev);
158
159         return i915_setup_compression(dev, size);
160 }
161
162 void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
163 {
164         struct drm_i915_private *dev_priv = dev->dev_private;
165
166         if (dev_priv->cfb_size == 0)
167                 return;
168
169         if (dev_priv->compressed_fb)
170                 drm_mm_put_block(dev_priv->compressed_fb);
171
172         if (dev_priv->compressed_llb)
173                 drm_mm_put_block(dev_priv->compressed_llb);
174
175         dev_priv->cfb_size = 0;
176 }
177
178 void i915_gem_cleanup_stolen(struct drm_device *dev)
179 {
180         struct drm_i915_private *dev_priv = dev->dev_private;
181
182         i915_gem_stolen_cleanup_compression(dev);
183         drm_mm_takedown(&dev_priv->mm.stolen);
184 }
185
186 int i915_gem_init_stolen(struct drm_device *dev)
187 {
188         struct drm_i915_private *dev_priv = dev->dev_private;
189         int bios_reserved = 0;
190
191         dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
192         if (dev_priv->mm.stolen_base == 0)
193                 return 0;
194
195         DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
196                       dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
197
198         if (IS_VALLEYVIEW(dev))
199                 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
200
201         /* Basic memrange allocator for stolen space */
202         drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
203                     bios_reserved);
204
205         return 0;
206 }
207
208 static struct sg_table *
209 i915_pages_create_for_stolen(struct drm_device *dev,
210                              u32 offset, u32 size)
211 {
212         struct drm_i915_private *dev_priv = dev->dev_private;
213         struct sg_table *st;
214         struct scatterlist *sg;
215
216         DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
217         BUG_ON(offset > dev_priv->gtt.stolen_size - size);
218
219         /* We hide that we have no struct page backing our stolen object
220          * by wrapping the contiguous physical allocation with a fake
221          * dma mapping in a single scatterlist.
222          */
223
224         st = kmalloc(sizeof(*st), GFP_KERNEL);
225         if (st == NULL)
226                 return NULL;
227
228         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
229                 kfree(st);
230                 return NULL;
231         }
232
233         sg = st->sgl;
234         sg->offset = offset;
235         sg->length = size;
236
237         sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
238         sg_dma_len(sg) = size;
239
240         return st;
241 }
242
243 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
244 {
245         BUG();
246         return -EINVAL;
247 }
248
249 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
250 {
251         /* Should only be called during free */
252         sg_free_table(obj->pages);
253         kfree(obj->pages);
254 }
255
256 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
257         .get_pages = i915_gem_object_get_pages_stolen,
258         .put_pages = i915_gem_object_put_pages_stolen,
259 };
260
261 static struct drm_i915_gem_object *
262 _i915_gem_object_create_stolen(struct drm_device *dev,
263                                struct drm_mm_node *stolen)
264 {
265         struct drm_i915_gem_object *obj;
266
267         obj = i915_gem_object_alloc(dev);
268         if (obj == NULL)
269                 return NULL;
270
271         if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
272                 goto cleanup;
273
274         i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
275
276         obj->pages = i915_pages_create_for_stolen(dev,
277                                                   stolen->start, stolen->size);
278         if (obj->pages == NULL)
279                 goto cleanup;
280
281         obj->has_dma_mapping = true;
282         i915_gem_object_pin_pages(obj);
283         obj->stolen = stolen;
284
285         obj->base.write_domain = I915_GEM_DOMAIN_GTT;
286         obj->base.read_domains = I915_GEM_DOMAIN_GTT;
287         obj->cache_level = I915_CACHE_NONE;
288
289         return obj;
290
291 cleanup:
292         i915_gem_object_free(obj);
293         return NULL;
294 }
295
296 struct drm_i915_gem_object *
297 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
298 {
299         struct drm_i915_private *dev_priv = dev->dev_private;
300         struct drm_i915_gem_object *obj;
301         struct drm_mm_node *stolen;
302
303         if (dev_priv->mm.stolen_base == 0)
304                 return NULL;
305
306         DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
307         if (size == 0)
308                 return NULL;
309
310         stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
311         if (stolen)
312                 stolen = drm_mm_get_block(stolen, size, 4096);
313         if (stolen == NULL)
314                 return NULL;
315
316         obj = _i915_gem_object_create_stolen(dev, stolen);
317         if (obj)
318                 return obj;
319
320         drm_mm_put_block(stolen);
321         return NULL;
322 }
323
324 struct drm_i915_gem_object *
325 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
326                                                u32 stolen_offset,
327                                                u32 gtt_offset,
328                                                u32 size)
329 {
330         struct drm_i915_private *dev_priv = dev->dev_private;
331         struct drm_i915_gem_object *obj;
332         struct drm_mm_node *stolen;
333
334         if (dev_priv->mm.stolen_base == 0)
335                 return NULL;
336
337         DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
338                         stolen_offset, gtt_offset, size);
339
340         /* KISS and expect everything to be page-aligned */
341         BUG_ON(stolen_offset & 4095);
342         BUG_ON(size & 4095);
343
344         if (WARN_ON(size == 0))
345                 return NULL;
346
347         stolen = drm_mm_create_block(&dev_priv->mm.stolen,
348                                      stolen_offset, size,
349                                      false);
350         if (stolen == NULL) {
351                 DRM_DEBUG_KMS("failed to allocate stolen space\n");
352                 return NULL;
353         }
354
355         obj = _i915_gem_object_create_stolen(dev, stolen);
356         if (obj == NULL) {
357                 DRM_DEBUG_KMS("failed to allocate stolen object\n");
358                 drm_mm_put_block(stolen);
359                 return NULL;
360         }
361
362         /* Some objects just need physical mem from stolen space */
363         if (gtt_offset == -1)
364                 return obj;
365
366         /* To simplify the initialisation sequence between KMS and GTT,
367          * we allow construction of the stolen object prior to
368          * setting up the GTT space. The actual reservation will occur
369          * later.
370          */
371         if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
372                 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
373                                                      gtt_offset, size,
374                                                      false);
375                 if (obj->gtt_space == NULL) {
376                         DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
377                         drm_gem_object_unreference(&obj->base);
378                         return NULL;
379                 }
380         } else
381                 obj->gtt_space = I915_GTT_RESERVED;
382
383         obj->gtt_offset = gtt_offset;
384         obj->has_global_gtt_mapping = 1;
385
386         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
387         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
388
389         return obj;
390 }
391
392 void
393 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
394 {
395         if (obj->stolen) {
396                 drm_mm_put_block(obj->stolen);
397                 obj->stolen = NULL;
398         }
399 }