}
struct nvkm_instmem {
- struct nvkm_subdev base;
+ struct nvkm_subdev subdev;
struct list_head list;
u32 reserved;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_instmem *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
if (ret)
return ret;
- mutex_lock(&imem->base.mutex);
+ mutex_lock(&imem->subdev.mutex);
list_add(&iobj->head, &imem->list);
- mutex_unlock(&imem->base.mutex);
+ mutex_unlock(&imem->subdev.mutex);
return 0;
}
nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
u32 size, u32 align, struct nvkm_object **pobject)
{
- struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass;
+ struct nvkm_instmem_impl *impl = (void *)imem->subdev.object.oclass;
struct nvkm_instobj_args args = { .size = size, .align = align };
return nvkm_object_ctor(parent, &parent->engine->subdev.object,
impl->instobj, &args, sizeof(args), pobject);
int i, ret = 0;
if (suspend) {
- mutex_lock(&imem->base.mutex);
+ mutex_lock(&imem->subdev.mutex);
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
if (!iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
- mutex_unlock(&imem->base.mutex);
+ mutex_unlock(&imem->subdev.mutex);
if (ret)
return ret;
}
- return nvkm_subdev_fini(&imem->base, suspend);
+ return nvkm_subdev_fini(&imem->subdev, suspend);
}
int
struct nvkm_instobj *iobj;
int ret, i;
- ret = nvkm_subdev_init(&imem->base);
+ ret = nvkm_subdev_init(&imem->subdev);
if (ret)
return ret;
- mutex_lock(&imem->base.mutex);
+ mutex_lock(&imem->subdev.mutex);
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
iobj->suspend = NULL;
}
}
- mutex_unlock(&imem->base.mutex);
+ mutex_unlock(&imem->subdev.mutex);
return 0;
}
#include "priv.h"
-struct gk20a_instobj_priv {
+struct gk20a_instobj {
struct nvkm_instobj base;
/* Must be second member here - see nouveau_gpuobj_map_vm() */
struct nvkm_mem *mem;
* Used for objects allocated using the DMA API
*/
struct gk20a_instobj_dma {
- struct gk20a_instobj_priv base;
+ struct gk20a_instobj base;
void *cpuaddr;
dma_addr_t handle;
* Used for objects flattened using the IOMMU API
*/
struct gk20a_instobj_iommu {
- struct gk20a_instobj_priv base;
+ struct gk20a_instobj base;
/* array of base.mem->size pages */
struct page *pages[];
};
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
struct nvkm_instmem base;
spinlock_t lock;
u64 addr;
static u32
gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
{
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct gk20a_instobj_priv *node = (void *)object;
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
+ struct gk20a_instobj *node = (void *)object;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
u32 data;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ spin_lock_irqsave(&imem->lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nv_wr32(imem, 0x001700, base >> 16);
+ imem->addr = base;
}
- data = nv_rd32(priv, 0x700000 + addr);
- spin_unlock_irqrestore(&priv->lock, flags);
+ data = nv_rd32(imem, 0x700000 + addr);
+ spin_unlock_irqrestore(&imem->lock, flags);
return data;
}
static void
gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
{
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct gk20a_instobj_priv *node = (void *)object;
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
+ struct gk20a_instobj *node = (void *)object;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ spin_lock_irqsave(&imem->lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nv_wr32(imem, 0x001700, base >> 16);
+ imem->addr = base;
}
- nv_wr32(priv, 0x700000 + addr, data);
- spin_unlock_irqrestore(&priv->lock, flags);
+ nv_wr32(imem, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&imem->lock, flags);
}
static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
{
struct gk20a_instobj_dma *node = (void *)_node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
- struct device *dev = nv_device_base(nv_device(priv));
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(node);
+ struct device *dev = nv_device_base(nv_device(imem));
if (unlikely(!node->cpuaddr))
return;
dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
- node->handle, &priv->attrs);
+ node->handle, &imem->attrs);
}
static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
{
struct gk20a_instobj_iommu *node = (void *)_node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(node);
struct nvkm_mm_node *r;
int i;
rl_entry);
/* clear bit 34 to unmap pages */
- r->offset &= ~BIT(34 - priv->iommu_pgshift);
+ r->offset &= ~BIT(34 - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
for (i = 0; i < _node->mem->size; i++) {
- iommu_unmap(priv->domain,
- (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+ iommu_unmap(imem->domain,
+ (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
__free_page(node->pages[i]);
}
/* Release area from GPU address space */
- mutex_lock(priv->mm_mutex);
- nvkm_mm_free(priv->mm, &r);
- mutex_unlock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
+ nvkm_mm_free(imem->mm, &r);
+ mutex_unlock(imem->mm_mutex);
}
static void
gk20a_instobj_dtor(struct nvkm_object *object)
{
- struct gk20a_instobj_priv *node = (void *)object;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+ struct gk20a_instobj *node = (void *)object;
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(node);
- if (priv->domain)
+ if (imem->domain)
gk20a_instobj_dtor_iommu(node);
else
gk20a_instobj_dtor_dma(node);
static int
gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 npages, u32 align,
- struct gk20a_instobj_priv **_node)
+ struct gk20a_instobj **_node)
{
struct gk20a_instobj_dma *node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct device *dev = nv_device_base(nv_device(parent));
int ret;
node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
- &priv->attrs);
+ &imem->attrs);
if (!node->cpuaddr) {
- nv_error(priv, "cannot allocate DMA memory\n");
+ nv_error(imem, "cannot allocate DMA memory\n");
return -ENOMEM;
}
/* alignment check */
if (unlikely(node->handle & (align - 1)))
- nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
+ nv_warn(imem, "memory not aligned as requested: %pad (0x%x)\n",
&node->handle, align);
/* present memory for being mapped using small pages */
static int
gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u32 npages, u32 align,
- struct gk20a_instobj_priv **_node)
+ struct gk20a_instobj **_node)
{
struct gk20a_instobj_iommu *node;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
struct nvkm_mm_node *r;
int ret;
int i;
node->pages[i] = p;
}
- mutex_lock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
/* Reserve area from GPU address space */
- ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
- align >> priv->iommu_pgshift, &r);
- mutex_unlock(priv->mm_mutex);
+ ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+ align >> imem->iommu_pgshift, &r);
+ mutex_unlock(imem->mm_mutex);
if (ret) {
- nv_error(priv, "virtual space is full!\n");
+ nv_error(imem, "virtual space is full!\n");
goto free_pages;
}
/* Map into GPU address space */
for (i = 0; i < npages; i++) {
struct page *p = node->pages[i];
- u32 offset = (r->offset + i) << priv->iommu_pgshift;
+ u32 offset = (r->offset + i) << imem->iommu_pgshift;
- ret = iommu_map(priv->domain, offset, page_to_phys(p),
+ ret = iommu_map(imem->domain, offset, page_to_phys(p),
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret < 0) {
- nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+ nv_error(imem, "IOMMU mapping failure: %d\n", ret);
while (i-- > 0) {
offset -= PAGE_SIZE;
- iommu_unmap(priv->domain, offset, PAGE_SIZE);
+ iommu_unmap(imem->domain, offset, PAGE_SIZE);
}
goto release_area;
}
}
/* Bit 34 tells that an address is to be resolved through the IOMMU */
- r->offset |= BIT(34 - priv->iommu_pgshift);
+ r->offset |= BIT(34 - imem->iommu_pgshift);
- node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+ node->base._mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
INIT_LIST_HEAD(&node->base._mem.regions);
list_add_tail(&r->rl_entry, &node->base._mem.regions);
return 0;
release_area:
- mutex_lock(priv->mm_mutex);
- nvkm_mm_free(priv->mm, &r);
- mutex_unlock(priv->mm_mutex);
+ mutex_lock(imem->mm_mutex);
+ nvkm_mm_free(imem->mm, &r);
+ mutex_unlock(imem->mm_mutex);
free_pages:
for (i = 0; i < npages && node->pages[i] != NULL; i++)
struct nvkm_object **pobject)
{
struct nvkm_instobj_args *args = data;
- struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct gk20a_instobj_priv *node;
+ struct gk20a_instmem *imem = (void *)nvkm_instmem(parent);
+ struct gk20a_instobj *node;
u32 size, align;
int ret;
nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
- priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+ imem->domain ? "IOMMU" : "DMA", args->size, args->align);
/* Round size and align to page bounds */
size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
- if (priv->domain)
+ if (imem->domain)
ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
size >> PAGE_SHIFT, align, &node);
else
static int
gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
{
- struct gk20a_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nvkm_instmem_fini(&priv->base, suspend);
+ struct gk20a_instmem *imem = (void *)object;
+ imem->addr = ~0ULL;
+ return nvkm_instmem_fini(&imem->base, suspend);
}
static int
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct gk20a_instmem_priv *priv;
+ struct gk20a_instmem *imem;
struct nouveau_platform_device *plat;
int ret;
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &imem);
+ *pobject = nv_object(imem);
if (ret)
return ret;
- spin_lock_init(&priv->lock);
+ spin_lock_init(&imem->lock);
plat = nv_device_to_platform(nv_device(parent));
if (plat->gpu->iommu.domain) {
- priv->domain = plat->gpu->iommu.domain;
- priv->mm = plat->gpu->iommu.mm;
- priv->iommu_pgshift = plat->gpu->iommu.pgshift;
- priv->mm_mutex = &plat->gpu->iommu.mutex;
+ imem->domain = plat->gpu->iommu.domain;
+ imem->mm = plat->gpu->iommu.mm;
+ imem->iommu_pgshift = plat->gpu->iommu.pgshift;
+ imem->mm_mutex = &plat->gpu->iommu.mutex;
- nv_info(priv, "using IOMMU\n");
+ nv_info(imem, "using IOMMU\n");
} else {
- init_dma_attrs(&priv->attrs);
+ init_dma_attrs(&imem->attrs);
/*
* We will access instmem through PRAMIN and thus do not need a
* consistent CPU pointer or kernel mapping
*/
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
- dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+ dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
- nv_info(priv, "using DMA API\n");
+ nv_info(imem, "using DMA API\n");
}
return 0;
static u32
nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv04_instobj_priv *node = (void *)object;
- return nv_ro32(priv, node->mem->offset + addr);
+ struct nv04_instmem *imem = (void *)nvkm_instmem(object);
+ struct nv04_instobj *node = (void *)object;
+ return nv_ro32(imem, node->mem->offset + addr);
}
static void
nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv04_instobj_priv *node = (void *)object;
- nv_wo32(priv, node->mem->offset + addr, data);
+ struct nv04_instmem *imem = (void *)nvkm_instmem(object);
+ struct nv04_instobj *node = (void *)object;
+ nv_wo32(imem, node->mem->offset + addr, data);
}
static void
nv04_instobj_dtor(struct nvkm_object *object)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv04_instobj_priv *node = (void *)object;
- struct nvkm_subdev *subdev = (void *)priv;
-
- mutex_lock(&subdev->mutex);
- nvkm_mm_free(&priv->heap, &node->mem);
- mutex_unlock(&subdev->mutex);
-
+ struct nv04_instmem *imem = (void *)nvkm_instmem(object);
+ struct nv04_instobj *node = (void *)object;
+ mutex_lock(&imem->base.subdev.mutex);
+ nvkm_mm_free(&imem->heap, &node->mem);
+ mutex_unlock(&imem->base.subdev.mutex);
nvkm_instobj_destroy(&node->base);
}
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
- struct nv04_instobj_priv *node;
+ struct nv04_instmem *imem = (void *)nvkm_instmem(parent);
+ struct nv04_instobj *node;
struct nvkm_instobj_args *args = data;
- struct nvkm_subdev *subdev = (void *)priv;
int ret;
if (!args->align)
if (ret)
return ret;
- mutex_lock(&subdev->mutex);
- ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
+ mutex_lock(&imem->base.subdev.mutex);
+ ret = nvkm_mm_head(&imem->heap, 0, 1, args->size, args->size,
args->align, &node->mem);
- mutex_unlock(&subdev->mutex);
+ mutex_unlock(&imem->base.subdev.mutex);
if (ret)
return ret;
void
nv04_instmem_dtor(struct nvkm_object *object)
{
- struct nv04_instmem_priv *priv = (void *)object;
- nvkm_gpuobj_ref(NULL, &priv->ramfc);
- nvkm_gpuobj_ref(NULL, &priv->ramro);
- nvkm_ramht_ref(NULL, &priv->ramht);
- nvkm_gpuobj_ref(NULL, &priv->vbios);
- nvkm_mm_fini(&priv->heap);
- if (priv->iomem)
- iounmap(priv->iomem);
- nvkm_instmem_destroy(&priv->base);
+ struct nv04_instmem *imem = (void *)object;
+ nvkm_gpuobj_ref(NULL, &imem->ramfc);
+ nvkm_gpuobj_ref(NULL, &imem->ramro);
+ nvkm_ramht_ref(NULL, &imem->ramht);
+ nvkm_gpuobj_ref(NULL, &imem->vbios);
+ nvkm_mm_fini(&imem->heap);
+ if (imem->iomem)
+ iounmap(imem->iomem);
+ nvkm_instmem_destroy(&imem->base);
}
static int
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv04_instmem_priv *priv;
+ struct nv04_instmem *imem;
int ret;
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &imem);
+ *pobject = nv_object(imem);
if (ret)
return ret;
/* PRAMIN aperture maps over the end of VRAM, reserve it */
- priv->base.reserved = 512 * 1024;
+ imem->base.reserved = 512 * 1024;
- ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+ ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
- &priv->vbios);
+ ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
+ &imem->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+ ret = nvkm_ramht_new(nv_object(imem), NULL, 0x08000, 0, &imem->ramht);
if (ret)
return ret;
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+ ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x00800, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &imem->ramfc);
if (ret)
return ret;
/* 0x18800-0x18a00: reserve for RAMRO */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
- &priv->ramro);
+ ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x00200, 0, 0,
+ &imem->ramro);
if (ret)
return ret;
extern struct nvkm_instobj_impl nv04_instobj_oclass;
-struct nv04_instmem_priv {
+struct nv04_instmem {
struct nvkm_instmem base;
void __iomem *iomem;
struct nvkm_gpuobj *ramfc;
};
-static inline struct nv04_instmem_priv *
+static inline struct nv04_instmem *
nv04_instmem(void *obj)
{
return (void *)nvkm_instmem(obj);
}
-struct nv04_instobj_priv {
+struct nv04_instobj {
struct nvkm_instobj base;
struct nvkm_mm_node *mem;
};
static u32
nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
{
- struct nv04_instmem_priv *priv = (void *)object;
- return ioread32_native(priv->iomem + addr);
+ struct nv04_instmem *imem = (void *)object;
+ return ioread32_native(imem->iomem + addr);
}
static void
nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- struct nv04_instmem_priv *priv = (void *)object;
- iowrite32_native(data, priv->iomem + addr);
+ struct nv04_instmem *imem = (void *)object;
+ iowrite32_native(data, imem->iomem + addr);
}
static int
struct nvkm_object **pobject)
{
struct nvkm_device *device = nv_device(parent);
- struct nv04_instmem_priv *priv;
+ struct nv04_instmem *imem;
int ret, bar, vs;
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &imem);
+ *pobject = nv_object(imem);
if (ret)
return ret;
else
bar = 3;
- priv->iomem = ioremap(nv_device_resource_start(device, bar),
+ imem->iomem = ioremap(nv_device_resource_start(device, bar),
nv_device_resource_len(device, bar));
- if (!priv->iomem) {
- nv_error(priv, "unable to map PRAMIN BAR\n");
+ if (!imem->iomem) {
+ nv_error(imem, "unable to map PRAMIN BAR\n");
return -EFAULT;
}
* to fit graphics contexts for every channel, the magics come
* from engine/gr/nv40.c
*/
- vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
- if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
- else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
- else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs;
- else priv->base.reserved = 0x4a40 * vs;
- priv->base.reserved += 16 * 1024;
- priv->base.reserved *= 32; /* per-channel */
- priv->base.reserved += 512 * 1024; /* pci(e)gart table */
- priv->base.reserved += 512 * 1024; /* object storage */
-
- priv->base.reserved = round_up(priv->base.reserved, 4096);
-
- ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+ vs = hweight8((nv_rd32(imem, 0x001540) & 0x0000ff00) >> 8);
+ if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
+ else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
+ else if (nv44_gr_class(imem)) imem->base.reserved = 0x4980 * vs;
+ else imem->base.reserved = 0x4a40 * vs;
+ imem->base.reserved += 16 * 1024;
+ imem->base.reserved *= 32; /* per-channel */
+ imem->base.reserved += 512 * 1024; /* pci(e)gart table */
+ imem->base.reserved += 512 * 1024; /* object storage */
+
+ imem->base.reserved = round_up(imem->base.reserved, 4096);
+
+ ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
- &priv->vbios);
+ ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
+ &imem->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+ ret = nvkm_ramht_new(nv_object(imem), NULL, 0x08000, 0, &imem->ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
- &priv->ramro);
+ ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x08000, 0, 0,
+ &imem->ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap
*/
- ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+ ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x20000, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &imem->ramfc);
if (ret)
return ret;
#include <subdev/fb.h>
-struct nv50_instmem_priv {
+struct nv50_instmem {
struct nvkm_instmem base;
spinlock_t lock;
u64 addr;
};
-struct nv50_instobj_priv {
+struct nv50_instobj {
struct nvkm_instobj base;
struct nvkm_mem *mem;
};
static u32
nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
{
- struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv50_instobj_priv *node = (void *)object;
+ struct nv50_instmem *imem = (void *)nvkm_instmem(object);
+ struct nv50_instobj *node = (void *)object;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
u32 data;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ spin_lock_irqsave(&imem->lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nv_wr32(imem, 0x001700, base >> 16);
+ imem->addr = base;
}
- data = nv_rd32(priv, 0x700000 + addr);
- spin_unlock_irqrestore(&priv->lock, flags);
+ data = nv_rd32(imem, 0x700000 + addr);
+ spin_unlock_irqrestore(&imem->lock, flags);
return data;
}
static void
nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
{
- struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
- struct nv50_instobj_priv *node = (void *)object;
+ struct nv50_instmem *imem = (void *)nvkm_instmem(object);
+ struct nv50_instobj *node = (void *)object;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
- spin_lock_irqsave(&priv->lock, flags);
- if (unlikely(priv->addr != base)) {
- nv_wr32(priv, 0x001700, base >> 16);
- priv->addr = base;
+ spin_lock_irqsave(&imem->lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nv_wr32(imem, 0x001700, base >> 16);
+ imem->addr = base;
}
- nv_wr32(priv, 0x700000 + addr, data);
- spin_unlock_irqrestore(&priv->lock, flags);
+ nv_wr32(imem, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&imem->lock, flags);
}
static void
nv50_instobj_dtor(struct nvkm_object *object)
{
- struct nv50_instobj_priv *node = (void *)object;
+ struct nv50_instobj *node = (void *)object;
struct nvkm_fb *fb = nvkm_fb(object);
fb->ram->put(fb, &node->mem);
nvkm_instobj_destroy(&node->base);
{
struct nvkm_fb *fb = nvkm_fb(parent);
struct nvkm_instobj_args *args = data;
- struct nv50_instobj_priv *node;
+ struct nv50_instobj *node;
int ret;
args->size = max((args->size + 4095) & ~4095, (u32)4096);
static int
nv50_instmem_fini(struct nvkm_object *object, bool suspend)
{
- struct nv50_instmem_priv *priv = (void *)object;
- priv->addr = ~0ULL;
- return nvkm_instmem_fini(&priv->base, suspend);
+ struct nv50_instmem *imem = (void *)object;
+ imem->addr = ~0ULL;
+ return nvkm_instmem_fini(&imem->base, suspend);
}
static int
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- struct nv50_instmem_priv *priv;
+ struct nv50_instmem *imem;
int ret;
- ret = nvkm_instmem_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
+ ret = nvkm_instmem_create(parent, engine, oclass, &imem);
+ *pobject = nv_object(imem);
if (ret)
return ret;
- spin_lock_init(&priv->lock);
+ spin_lock_init(&imem->lock);
return 0;
}
#define nvkm_instmem_create(p,e,o,d) \
nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nvkm_instmem_destroy(p) \
- nvkm_subdev_destroy(&(p)->base)
+ nvkm_subdev_destroy(&(p)->subdev)
#define nvkm_instmem_init(p) ({ \
- struct nvkm_instmem *imem = (p); \
- _nvkm_instmem_init(nv_object(imem)); \
+ struct nvkm_instmem *_imem = (p); \
+ _nvkm_instmem_init(nv_object(_imem)); \
})
#define nvkm_instmem_fini(p,s) ({ \
- struct nvkm_instmem *imem = (p); \
- _nvkm_instmem_fini(nv_object(imem), (s)); \
+ struct nvkm_instmem *_imem = (p); \
+ _nvkm_instmem_fini(nv_object(_imem), (s)); \
})
int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,