2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_ramht.h"
31 nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
33 struct drm_device *dev = chan->dev;
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_ramht *ramht = chan->ramht;
39 NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
41 for (i = 32; i > 0; i -= ramht->bits) {
42 hash ^= (handle & ((1 << ramht->bits) - 1));
43 handle >>= ramht->bits;
46 if (dev_priv->card_type < NV_50)
47 hash ^= chan->id << (ramht->bits - 4);
50 NV_DEBUG(dev, "hash=0x%08x\n", hash);
55 nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 u32 ctx = nv_ro32(ramht, offset + 4);
61 if (dev_priv->card_type < NV_40)
62 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
67 nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
68 struct nouveau_gpuobj *ramht, u32 offset)
70 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
71 u32 ctx = nv_ro32(ramht, offset + 4);
73 if (dev_priv->card_type >= NV_50)
75 else if (dev_priv->card_type >= NV_40)
77 ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
80 ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
84 nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
85 struct nouveau_gpuobj *gpuobj)
87 struct drm_device *dev = chan->dev;
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
90 struct nouveau_ramht_entry *entry;
91 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
95 if (nouveau_ramht_find(chan, handle))
98 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
101 entry->channel = chan;
102 entry->gpuobj = NULL;
103 entry->handle = handle;
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
106 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
111 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->cinst >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) | 2;
119 ctx = (gpuobj->cinst >> 4) |
121 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
125 spin_lock_irqsave(&chan->ramht->lock, flags);
126 list_add(&entry->head, &chan->ramht->entries);
128 co = ho = nouveau_ramht_hash_handle(chan, handle);
130 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
132 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
133 chan->id, co, handle, ctx);
134 nv_wo32(ramht, co + 0, handle);
135 nv_wo32(ramht, co + 4, ctx);
137 spin_unlock_irqrestore(&chan->ramht->lock, flags);
141 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
142 chan->id, co, nv_ro32(ramht, co));
145 if (co >= ramht->size)
149 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
150 list_del(&entry->head);
151 spin_unlock_irqrestore(&chan->ramht->lock, flags);
157 nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
162 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
163 struct nouveau_ramht_entry *entry, *tmp;
166 list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
167 if (entry->channel != chan || entry->handle != handle)
170 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
171 list_del(&entry->head);
176 co = ho = nouveau_ramht_hash_handle(chan, handle);
178 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
179 nouveau_ramht_entry_same_channel(chan, ramht, co) &&
180 (handle == nv_ro32(ramht, co))) {
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan->id, co, handle, nv_ro32(ramht, co + 4));
184 nv_wo32(ramht, co + 0, 0x00000000);
185 nv_wo32(ramht, co + 4, 0x00000000);
191 if (co >= ramht->size)
195 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
200 nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
202 struct nouveau_ramht *ramht = chan->ramht;
205 spin_lock_irqsave(&ramht->lock, flags);
206 nouveau_ramht_remove_locked(chan, handle);
207 spin_unlock_irqrestore(&ramht->lock, flags);
210 struct nouveau_gpuobj *
211 nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
213 struct nouveau_ramht *ramht = chan->ramht;
214 struct nouveau_ramht_entry *entry;
215 struct nouveau_gpuobj *gpuobj = NULL;
218 if (unlikely(!chan->ramht))
221 spin_lock_irqsave(&ramht->lock, flags);
222 list_for_each_entry(entry, &chan->ramht->entries, head) {
223 if (entry->channel == chan && entry->handle == handle) {
224 gpuobj = entry->gpuobj;
228 spin_unlock_irqrestore(&ramht->lock, flags);
234 nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
235 struct nouveau_ramht **pramht)
237 struct nouveau_ramht *ramht;
239 ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
244 kref_init(&ramht->refcount);
245 ramht->bits = drm_order(gpuobj->size / 8);
246 INIT_LIST_HEAD(&ramht->entries);
247 spin_lock_init(&ramht->lock);
248 nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
255 nouveau_ramht_del(struct kref *ref)
257 struct nouveau_ramht *ramht =
258 container_of(ref, struct nouveau_ramht, refcount);
260 nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
265 nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
266 struct nouveau_channel *chan)
268 struct nouveau_ramht_entry *entry, *tmp;
269 struct nouveau_ramht *ramht;
273 kref_get(&ref->refcount);
277 spin_lock_irqsave(&ramht->lock, flags);
278 list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
279 if (entry->channel != chan)
282 nouveau_ramht_remove_locked(chan, entry->handle);
284 spin_unlock_irqrestore(&ramht->lock, flags);
286 kref_put(&ramht->refcount, nouveau_ramht_del);