}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
- int *page_shift)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
+ int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
}
} else {
if (likely(dev_priv->chan_vm)) {
- if (*size > 256 * 1024)
+ if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
if (dev_priv->chan_vm) {
int size, int align, uint32_t domain, uint32_t tile_mode,
uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
return ret;
nvbo = *pnvbo;
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ if (dev_priv->card_type >= NV_50)
+ nvbo->valid_domains &= domain;
+
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
- uint32_t domains = valid_domains &
+ uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;