abi16->handles |= (1 << init->channel);
/* create channel object and initialise dma and fence management */
+ if (device->card_type >= NV_E0) {
+ init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ init->tt_ctxdma_handle = 0;
+ }
+
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
init->channel, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
return ret;
}
+static int
+nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
}
void
-nouveau_bo_move_init(struct nouveau_channel *chan)
+nouveau_bo_move_init(struct nouveau_drm *drm)
{
- struct nouveau_cli *cli = chan->cli;
- struct nouveau_drm *drm = chan->drm;
static const struct {
const char *name;
int engine;
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
- { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
do {
struct nouveau_object *object;
+ struct nouveau_channel *chan;
u32 handle = (mthd->engine << 16) | mthd->oclass;
- ret = nouveau_object_new(nv_object(cli), chan->handle, handle,
+ if (mthd->init == nve0_bo_move_init)
+ chan = drm->cechan;
+ else
+ chan = drm->channel;
+ if (chan == NULL)
+ continue;
+
+ ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
mthd->oclass, NULL, 0, &object);
if (ret == 0) {
ret = mthd->init(chan, handle);
if (ret) {
- nouveau_object_del(nv_object(cli),
+ nouveau_object_del(nv_object(drm),
chan->handle, handle);
continue;
}
extern struct ttm_bo_driver nouveau_bo_driver;
-void nouveau_bo_move_init(struct nouveau_channel *);
+void nouveau_bo_move_init(struct nouveau_drm *);
int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct nouveau_bo **);
int
nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, struct nouveau_channel **pchan)
+ u32 parent, u32 handle, u32 engine,
+ struct nouveau_channel **pchan)
{
static const u16 oclasses[] = { 0xa06f, 0x906f, 0x826f, 0x506f, 0 };
const u16 *oclass = oclasses;
args.pushbuf = chan->push.handle;
args.ioffset = 0x10000 + chan->push.vma.offset;
args.ilength = 0x02000;
- args.engine = NVE0_CHANNEL_IND_ENGINE_GR;
+ args.engine = engine;
do {
ret = nouveau_object_new(nv_object(cli), parent, handle,
struct nv_dma_class args;
int ret, i;
- chan->vram = vram;
- chan->gart = gart;
-
/* allocate dma objects to cover all allowed vram, and gart */
if (device->card_type < NV_C0) {
if (device->card_type >= NV_50) {
0x003d, &args, sizeof(args), &object);
if (ret)
return ret;
+
+ chan->vram = vram;
+ chan->gart = gart;
}
/* initialise dma tracking parameters */
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
- ret = nouveau_object_new(nv_object(client), chan->handle,
- NvSw, nouveau_abi16_swclass(chan->drm),
- NULL, 0, &object);
- if (ret)
- return ret;
+ if (chan != chan->drm->cechan) {
+ ret = nouveau_object_new(nv_object(client), chan->handle,
+ NvSw, nouveau_abi16_swclass(chan->drm),
+ NULL, 0, &object);
+ if (ret)
+ return ret;
- swch = (void *)object->parent;
- swch->flip = nouveau_flip_complete;
- swch->flip_data = chan;
+ swch = (void *)object->parent;
+ swch->flip = nouveau_flip_complete;
+ swch->flip_data = chan;
+ }
if (device->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
int
nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 vram, u32 gart,
+ u32 parent, u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **pchan)
{
int ret;
- ret = nouveau_channel_ind(drm, cli, parent, handle, pchan);
+ ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
if (ret) {
NV_DEBUG(drm, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
}
}
- ret = nouveau_channel_init(*pchan, vram, gart);
+ ret = nouveau_channel_init(*pchan, arg0, arg1);
if (ret) {
NV_ERROR(drm, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
- u32 parent, u32 handle, u32 vram, u32 gart,
+ u32 parent, u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
{
nouveau_gpuobj_ref(NULL, &drm->notify);
nouveau_channel_del(&drm->channel);
+ nouveau_channel_del(&drm->cechan);
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
}
{
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_object *object;
+ u32 arg0, arg1;
int ret;
if (nouveau_noaccel)
return;
}
+ if (device->card_type >= NV_E0) {
+ ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
+ NVDRM_CHAN + 1,
+ NVE0_CHANNEL_IND_ENGINE_CE0 |
+ NVE0_CHANNEL_IND_ENGINE_CE1, 0,
+ &drm->cechan);
+ if (ret)
+ NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+
+ arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
+ arg1 = 0;
+ } else {
+ arg0 = NvDmaFB;
+ arg1 = NvDmaTT;
+ }
+
ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
- NvDmaFB, NvDmaTT, &drm->channel);
+ arg0, arg1, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_fini(drm);
}
- nouveau_bo_move_init(drm->channel);
+ nouveau_bo_move_init(drm);
}
static int __devinit
void *fence;
/* context for accelerated drm-internal operations */
+ struct nouveau_channel *cechan;
struct nouveau_channel *channel;
struct nouveau_gpuobj *notify;
struct nouveau_fbdev *fbcon;