2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
25 #include <subdev/fb.h>
28 struct nouveau_mem base;
32 #define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
35 gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
37 struct device *dev = nv_device_base(nv_device(pfb));
38 struct gk20a_mem *mem = to_gk20a_mem(*pmem);
41 if (unlikely(mem == NULL))
44 if (likely(mem->cpuaddr))
45 dma_free_coherent(dev, mem->base.size << PAGE_SHIFT,
46 mem->cpuaddr, mem->handle);
48 kfree(mem->base.pages);
53 gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
54 u32 memtype, struct nouveau_mem **pmem)
56 struct device *dev = nv_device_base(nv_device(pfb));
57 struct gk20a_mem *mem;
58 u32 type = memtype & 0xff;
62 nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
65 npages = size >> PAGE_SHIFT;
73 /* round alignment to the next power of 2, if needed */
75 if ((align & (align - 1)) == 0)
79 /* ensure returned address is correctly aligned */
80 npages = max(align, npages);
82 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
86 mem->base.size = npages;
87 mem->base.memtype = type;
89 mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
90 if (!mem->base.pages) {
97 mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
98 &mem->handle, GFP_KERNEL);
100 nv_error(pfb, "%s: cannot allocate memory!\n", __func__);
101 gk20a_ram_put(pfb, pmem);
105 align <<= PAGE_SHIFT;
107 /* alignment check */
108 if (unlikely(mem->handle & (align - 1)))
109 nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n",
110 &mem->handle, align);
112 nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
113 npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);
115 for (i = 0; i < npages; i++)
116 mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
118 mem->base.offset = (u64)mem->base.pages[0];
124 gk20a_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
125 struct nouveau_oclass *oclass, void *data, u32 datasize,
126 struct nouveau_object **pobject)
128 struct nouveau_ram *ram;
131 ret = nouveau_ram_create(parent, engine, oclass, &ram);
132 *pobject = nv_object(ram);
135 ram->type = NV_MEM_TYPE_STOLEN;
136 ram->size = get_num_physpages() << PAGE_SHIFT;
138 ram->get = gk20a_ram_get;
139 ram->put = gk20a_ram_put;
144 struct nouveau_oclass
146 .ofuncs = &(struct nouveau_ofuncs) {
147 .ctor = gk20a_ram_ctor,
148 .dtor = _nouveau_ram_dtor,
149 .init = _nouveau_ram_init,
150 .fini = _nouveau_ram_fini,