struct nouveau_vma tmp_vma;
u8 page_shift;
+ struct drm_mm_node *tag;
struct list_head regions;
dma_addr_t *pages;
u32 memtype;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0xff, &node);
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret)
return ret;
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
+ delta = 0;
list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
end = max;
len = end - pte;
- vm->map(vma, pgt, node, pte, len, phys);
+ vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
pde++;
pte = 0;
}
+
+ delta += (u64)len << vma->node->type;
}
}
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
dma_addr_t r100c08;
};
+static void
+nv50_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv = pfb->priv;
+
+ if (drm_mm_initialized(&pfb->tag_heap))
+ drm_mm_takedown(&pfb->tag_heap);
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
static int
nv50_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv;
+ u32 tagmem;
+ int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ pfb->priv = priv;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c08_page) {
- kfree(priv);
+ nv50_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
return -EFAULT;
}
- dev_priv->engine.fb.priv = priv;
+ tagmem = nv_rd32(dev, 0x100320);
+ NV_DEBUG(dev, "%d tags available\n", tagmem);
+ ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
+ if (ret) {
+ nv50_fb_destroy(dev);
+ return ret;
+ }
+
return 0;
}
void
nv50_fb_takedown(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fb_priv *priv;
-
- priv = dev_priv->engine.fb.priv;
- if (!priv)
- return;
- dev_priv->engine.fb.priv = NULL;
-
- pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
}
void
void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ u32 comp = (mem->memtype & 0x180) >> 7;
u32 block;
int i;
phys += block << (vma->node->type - 3);
cnt -= block;
+ if (comp) {
+ u32 tag = mem->tag->start + ((delta >> 16) * comp);
+ offset_h |= (tag << 17);
+ delta += block << (vma->node->type - 3);
+ }
while (block) {
nv_wo32(pgt, pte + 0, offset_l);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
+
+ if (mem->tag) {
+ drm_mm_put_block(mem->tag);
+ mem->tag = NULL;
+ }
mutex_unlock(&mm->mutex);
kfree(mem);
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
- u32 type, struct nouveau_mem **pmem)
+ u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
+ int comp = (memtype & 0x300) >> 8;
+ int type = (memtype & 0x07f);
int ret;
if (!types[type])
if (!mem)
return -ENOMEM;
+ mutex_lock(&mm->mutex);
+ if (comp) {
+ if (align == 16) {
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int n = (size >> 4) * comp;
+
+ mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
+ if (mem->tag)
+ mem->tag = drm_mm_get_block(mem->tag, n, 0);
+ }
+
+ if (unlikely(!mem->tag))
+ comp = 0;
+ }
+
INIT_LIST_HEAD(&mem->regions);
mem->dev = dev_priv->dev;
- mem->memtype = type;
+ mem->memtype = (comp << 7) | type;
mem->size = size;
- mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 next = 1 << (vma->node->type - 8);
INIT_LIST_HEAD(&mem->regions);
mem->dev = dev_priv->dev;
- mem->memtype = type;
+ mem->memtype = (type & 0xff);
mem->size = size;
mutex_lock(&mm->mutex);
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
#define NOUVEAU_GEM_TILE_32BPP 0x00000002