]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nv10_fence.c
Merge tag 'fbdev-updates-for-3.5' of git://github.com/schandinat/linux-2.6
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nv10_fence.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_dma.h"
28 #include "nouveau_ramht.h"
29 #include "nouveau_fence.h"
30
31 struct nv10_fence_chan {
32         struct nouveau_fence_chan base;
33 };
34
35 struct nv10_fence_priv {
36         struct nouveau_fence_priv base;
37         struct nouveau_bo *bo;
38         spinlock_t lock;
39         u32 sequence;
40 };
41
42 static int
43 nv10_fence_emit(struct nouveau_fence *fence)
44 {
45         struct nouveau_channel *chan = fence->channel;
46         int ret = RING_SPACE(chan, 2);
47         if (ret == 0) {
48                 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
49                 OUT_RING  (chan, fence->sequence);
50                 FIRE_RING (chan);
51         }
52         return ret;
53 }
54
55
56 static int
57 nv10_fence_sync(struct nouveau_fence *fence,
58                 struct nouveau_channel *prev, struct nouveau_channel *chan)
59 {
60         return -ENODEV;
61 }
62
63 static int
64 nv17_fence_sync(struct nouveau_fence *fence,
65                 struct nouveau_channel *prev, struct nouveau_channel *chan)
66 {
67         struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
68         u32 value;
69         int ret;
70
71         if (!mutex_trylock(&prev->mutex))
72                 return -EBUSY;
73
74         spin_lock(&priv->lock);
75         value = priv->sequence;
76         priv->sequence += 2;
77         spin_unlock(&priv->lock);
78
79         ret = RING_SPACE(prev, 5);
80         if (!ret) {
81                 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
82                 OUT_RING  (prev, NvSema);
83                 OUT_RING  (prev, 0);
84                 OUT_RING  (prev, value + 0);
85                 OUT_RING  (prev, value + 1);
86                 FIRE_RING (prev);
87         }
88
89         if (!ret && !(ret = RING_SPACE(chan, 5))) {
90                 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
91                 OUT_RING  (chan, NvSema);
92                 OUT_RING  (chan, 0);
93                 OUT_RING  (chan, value + 1);
94                 OUT_RING  (chan, value + 2);
95                 FIRE_RING (chan);
96         }
97
98         mutex_unlock(&prev->mutex);
99         return 0;
100 }
101
102 static u32
103 nv10_fence_read(struct nouveau_channel *chan)
104 {
105         return nvchan_rd32(chan, 0x0048);
106 }
107
108 static void
109 nv10_fence_context_del(struct nouveau_channel *chan, int engine)
110 {
111         struct nv10_fence_chan *fctx = chan->engctx[engine];
112         nouveau_fence_context_del(&fctx->base);
113         chan->engctx[engine] = NULL;
114         kfree(fctx);
115 }
116
117 static int
118 nv10_fence_context_new(struct nouveau_channel *chan, int engine)
119 {
120         struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
121         struct nv10_fence_chan *fctx;
122         struct nouveau_gpuobj *obj;
123         int ret = 0;
124
125         fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
126         if (!fctx)
127                 return -ENOMEM;
128
129         nouveau_fence_context_new(&fctx->base);
130
131         if (priv->bo) {
132                 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
133
134                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
135                                              mem->start * PAGE_SIZE, mem->size,
136                                              NV_MEM_ACCESS_RW,
137                                              NV_MEM_TARGET_VRAM, &obj);
138                 if (!ret) {
139                         ret = nouveau_ramht_insert(chan, NvSema, obj);
140                         nouveau_gpuobj_ref(NULL, &obj);
141                 }
142         }
143
144         if (ret)
145                 nv10_fence_context_del(chan, engine);
146         return ret;
147 }
148
149 static int
150 nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
151 {
152         return 0;
153 }
154
155 static int
156 nv10_fence_init(struct drm_device *dev, int engine)
157 {
158         return 0;
159 }
160
161 static void
162 nv10_fence_destroy(struct drm_device *dev, int engine)
163 {
164         struct drm_nouveau_private *dev_priv = dev->dev_private;
165         struct nv10_fence_priv *priv = nv_engine(dev, engine);
166
167         nouveau_bo_ref(NULL, &priv->bo);
168         dev_priv->eng[engine] = NULL;
169         kfree(priv);
170 }
171
172 int
173 nv10_fence_create(struct drm_device *dev)
174 {
175         struct drm_nouveau_private *dev_priv = dev->dev_private;
176         struct nv10_fence_priv *priv;
177         int ret = 0;
178
179         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
180         if (!priv)
181                 return -ENOMEM;
182
183         priv->base.engine.destroy = nv10_fence_destroy;
184         priv->base.engine.init = nv10_fence_init;
185         priv->base.engine.fini = nv10_fence_fini;
186         priv->base.engine.context_new = nv10_fence_context_new;
187         priv->base.engine.context_del = nv10_fence_context_del;
188         priv->base.emit = nv10_fence_emit;
189         priv->base.read = nv10_fence_read;
190         priv->base.sync = nv10_fence_sync;
191         dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
192         spin_lock_init(&priv->lock);
193
194         if (dev_priv->chipset >= 0x17) {
195                 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
196                                      0, 0x0000, NULL, &priv->bo);
197                 if (!ret) {
198                         ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
199                         if (!ret)
200                                 ret = nouveau_bo_map(priv->bo);
201                         if (ret)
202                                 nouveau_bo_ref(NULL, &priv->bo);
203                 }
204
205                 if (ret == 0) {
206                         nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
207                         priv->base.sync = nv17_fence_sync;
208                 }
209         }
210
211         if (ret)
212                 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
213         return ret;
214 }