2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
30 region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
32 list_del(&a->nl_entry);
33 list_del(&a->fl_entry);
37 static struct nouveau_mm_node *
38 region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
40 struct nouveau_mm_node *b;
42 if (a->length == size)
45 b = kmalloc(sizeof(*b), GFP_KERNEL);
46 if (unlikely(b == NULL))
49 b->offset = a->offset;
55 list_add_tail(&b->nl_entry, &a->nl_entry);
57 list_add_tail(&b->fl_entry, &a->fl_entry);
61 static struct nouveau_mm_node *
62 nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
64 struct nouveau_mm_node *prev, *next;
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
90 nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
95 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this);
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
105 /* split off any still-typed region at the start */
106 if (block_s != this->offset) {
107 if (!region_split(rmm, this, block_s - this->offset))
111 /* split off the soon-to-be-untyped block(s) */
112 block_l = rounddown(this->length, rmm->block_size);
113 if (block_l != this->length) {
114 this = region_split(rmm, this, block_l);
119 /* mark as having no type, and retry merge with any adjacent
123 nouveau_mm_merge(rmm, this);
127 nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode)
130 struct nouveau_mm_node *this, *tmp, *next;
131 u32 splitoff, avail, alloc;
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
135 if (this->nl_entry.next == &rmm->nodes)
138 /* skip wrongly typed blocks */
139 if (this->type && this->type != type)
142 /* account for alignment */
143 splitoff = this->offset & (align - 1);
145 splitoff = align - splitoff;
147 if (this->length <= splitoff)
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
159 /* determine allocation size */
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
179 this = region_split(rmm, this, block);
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
191 amount = roundup(amount, rmm->block_size);
193 next = region_split(rmm, next, amount);
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
211 if (!region_split(rmm, this, splitoff))
215 this = region_split(rmm, this, alloc);
220 list_del(&this->fl_entry);
229 nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
231 struct nouveau_mm *rmm;
232 struct nouveau_mm_node *heap;
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
238 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset;
241 rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
246 rmm->block_size = block;
247 mutex_init(&rmm->mutex);
248 INIT_LIST_HEAD(&rmm->nodes);
249 INIT_LIST_HEAD(&rmm->free);
250 list_add(&heap->nl_entry, &rmm->nodes);
251 list_add(&heap->fl_entry, &rmm->free);
258 nouveau_mm_fini(struct nouveau_mm **prmm)
260 struct nouveau_mm *rmm = *prmm;
261 struct nouveau_mm_node *heap =
262 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
264 if (!list_is_singular(&rmm->nodes))