2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <drm/drm_mode.h>
22 #include "drm_crtc_helper.h"
23 #include "drm_flip_work.h"
33 /* which mixer/encoder we route output to: */
39 uint32_t width, height;
42 /* next cursor to scan-out: */
44 struct drm_gem_object *next_bo;
46 /* current cursor being scanned out: */
47 struct drm_gem_object *scanout_bo;
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event;
54 /* Bits have been flushed at the last commit,
55 * used to decide if a vsync has happened since last commit.
59 #define PENDING_CURSOR 0x1
60 #define PENDING_FLIP 0x2
63 /* for unref'ing cursor bo's after scanout completes: */
64 struct drm_flip_work unref_cursor_work;
66 struct mdp_irq vblank;
69 #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
71 static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
73 struct msm_drm_private *priv = crtc->dev->dev_private;
74 return to_mdp4_kms(to_mdp_kms(priv->kms));
77 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
79 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
81 atomic_or(pending, &mdp4_crtc->pending);
82 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
85 static void crtc_flush(struct drm_crtc *crtc)
87 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
88 struct mdp4_kms *mdp4_kms = get_kms(crtc);
89 struct drm_plane *plane;
92 drm_atomic_crtc_for_each_plane(plane, crtc) {
93 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
94 flush |= pipe2flush(pipe_id);
97 flush |= ovlp2flush(mdp4_crtc->ovlp);
99 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
101 mdp4_crtc->flushed_mask = flush;
103 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
106 /* if file!=NULL, this is preclose potential cancel-flip path */
107 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
109 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
110 struct drm_device *dev = crtc->dev;
111 struct drm_pending_vblank_event *event;
114 spin_lock_irqsave(&dev->event_lock, flags);
115 event = mdp4_crtc->event;
117 /* if regular vblank case (!file) or if cancel-flip from
118 * preclose on file that requested flip, then send the
121 if (!file || (event->base.file_priv == file)) {
122 mdp4_crtc->event = NULL;
123 DBG("%s: send event: %p", mdp4_crtc->name, event);
124 drm_send_vblank_event(dev, mdp4_crtc->id, event);
127 spin_unlock_irqrestore(&dev->event_lock, flags);
130 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
132 struct mdp4_crtc *mdp4_crtc =
133 container_of(work, struct mdp4_crtc, unref_cursor_work);
134 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
136 msm_gem_put_iova(val, mdp4_kms->id);
137 drm_gem_object_unreference_unlocked(val);
140 static void mdp4_crtc_destroy(struct drm_crtc *crtc)
142 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
144 drm_crtc_cleanup(crtc);
145 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
150 static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
151 const struct drm_display_mode *mode,
152 struct drm_display_mode *adjusted_mode)
157 /* statically (for now) map planes to mixer stage (z-order): */
158 static const int idxs[] = {
169 /* setup mixer config, for which we need to consider all crtc's and
170 * the planes attached to them
172 * TODO may possibly need some extra locking here
174 static void setup_mixer(struct mdp4_kms *mdp4_kms)
176 struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
177 struct drm_crtc *crtc;
178 uint32_t mixer_cfg = 0;
179 static const enum mdp_mixer_stage_id stages[] = {
180 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
183 list_for_each_entry(crtc, &config->crtc_list, head) {
184 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
185 struct drm_plane *plane;
187 drm_atomic_crtc_for_each_plane(plane, crtc) {
188 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
189 int idx = idxs[pipe_id];
190 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
191 pipe_id, stages[idx]);
195 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
198 static void blend_setup(struct drm_crtc *crtc)
200 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
201 struct mdp4_kms *mdp4_kms = get_kms(crtc);
202 struct drm_plane *plane;
203 int i, ovlp = mdp4_crtc->ovlp;
204 bool alpha[4]= { false, false, false, false };
206 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
207 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
208 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
209 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
211 drm_atomic_crtc_for_each_plane(plane, crtc) {
212 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
213 int idx = idxs[pipe_id];
215 const struct mdp_format *format =
216 to_mdp_format(msm_framebuffer_format(plane->fb));
217 alpha[idx-1] = format->alpha_enable;
221 for (i = 0; i < 4; i++) {
225 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
226 MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
227 MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
229 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
230 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
233 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
234 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
235 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
236 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
237 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
238 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
239 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
240 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
243 setup_mixer(mdp4_kms);
246 static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
248 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
249 struct mdp4_kms *mdp4_kms = get_kms(crtc);
250 enum mdp4_dma dma = mdp4_crtc->dma;
251 int ovlp = mdp4_crtc->ovlp;
252 struct drm_display_mode *mode;
254 if (WARN_ON(!crtc->state))
257 mode = &crtc->state->adjusted_mode;
259 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
260 mdp4_crtc->name, mode->base.id, mode->name,
261 mode->vrefresh, mode->clock,
262 mode->hdisplay, mode->hsync_start,
263 mode->hsync_end, mode->htotal,
264 mode->vdisplay, mode->vsync_start,
265 mode->vsync_end, mode->vtotal,
266 mode->type, mode->flags);
268 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
269 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
270 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
272 /* take data from pipe: */
273 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
274 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
275 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
276 MDP4_DMA_DST_SIZE_WIDTH(0) |
277 MDP4_DMA_DST_SIZE_HEIGHT(0));
279 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
280 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
281 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
282 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
283 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
285 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
288 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
289 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
290 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
294 static void mdp4_crtc_disable(struct drm_crtc *crtc)
296 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
297 struct mdp4_kms *mdp4_kms = get_kms(crtc);
299 DBG("%s", mdp4_crtc->name);
301 if (WARN_ON(!mdp4_crtc->enabled))
304 mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
305 mdp4_disable(mdp4_kms);
307 mdp4_crtc->enabled = false;
310 static void mdp4_crtc_enable(struct drm_crtc *crtc)
312 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
313 struct mdp4_kms *mdp4_kms = get_kms(crtc);
315 DBG("%s", mdp4_crtc->name);
317 if (WARN_ON(mdp4_crtc->enabled))
320 mdp4_enable(mdp4_kms);
321 mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
325 mdp4_crtc->enabled = true;
328 static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
329 struct drm_crtc_state *state)
331 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
332 DBG("%s: check", mdp4_crtc->name);
333 // TODO anything else to check?
337 static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
338 struct drm_crtc_state *old_crtc_state)
340 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
341 DBG("%s: begin", mdp4_crtc->name);
344 static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
345 struct drm_crtc_state *old_crtc_state)
347 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
348 struct drm_device *dev = crtc->dev;
351 DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
353 WARN_ON(mdp4_crtc->event);
355 spin_lock_irqsave(&dev->event_lock, flags);
356 mdp4_crtc->event = crtc->state->event;
357 spin_unlock_irqrestore(&dev->event_lock, flags);
361 request_pending(crtc, PENDING_FLIP);
364 static int mdp4_crtc_set_property(struct drm_crtc *crtc,
365 struct drm_property *property, uint64_t val)
371 #define CURSOR_WIDTH 64
372 #define CURSOR_HEIGHT 64
374 /* called from IRQ to update cursor related registers (if needed). The
375 * cursor registers, other than x/y position, appear not to be double
376 * buffered, and changing them other than from vblank seems to trigger
379 static void update_cursor(struct drm_crtc *crtc)
381 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
382 struct mdp4_kms *mdp4_kms = get_kms(crtc);
383 enum mdp4_dma dma = mdp4_crtc->dma;
386 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
387 if (mdp4_crtc->cursor.stale) {
388 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
389 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
390 uint32_t iova = mdp4_crtc->cursor.next_iova;
393 /* take a obj ref + iova ref when we start scanning out: */
394 drm_gem_object_reference(next_bo);
395 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
398 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
399 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
400 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
401 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
402 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
403 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
404 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
406 /* disable cursor: */
407 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
408 mdp4_kms->blank_cursor_iova);
411 /* and drop the iova ref + obj rev when done scanning out: */
413 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
415 mdp4_crtc->cursor.scanout_bo = next_bo;
416 mdp4_crtc->cursor.stale = false;
419 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
420 MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
421 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
423 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
426 static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
427 struct drm_file *file_priv, uint32_t handle,
428 uint32_t width, uint32_t height)
430 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
431 struct mdp4_kms *mdp4_kms = get_kms(crtc);
432 struct drm_device *dev = crtc->dev;
433 struct drm_gem_object *cursor_bo, *old_bo;
438 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
439 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
444 cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
452 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
459 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
460 old_bo = mdp4_crtc->cursor.next_bo;
461 mdp4_crtc->cursor.next_bo = cursor_bo;
462 mdp4_crtc->cursor.next_iova = iova;
463 mdp4_crtc->cursor.width = width;
464 mdp4_crtc->cursor.height = height;
465 mdp4_crtc->cursor.stale = true;
466 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
469 /* drop our previous reference: */
470 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
473 request_pending(crtc, PENDING_CURSOR);
478 drm_gem_object_unreference_unlocked(cursor_bo);
482 static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
484 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
487 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
488 mdp4_crtc->cursor.x = x;
489 mdp4_crtc->cursor.y = y;
490 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
493 request_pending(crtc, PENDING_CURSOR);
498 static const struct drm_crtc_funcs mdp4_crtc_funcs = {
499 .set_config = drm_atomic_helper_set_config,
500 .destroy = mdp4_crtc_destroy,
501 .page_flip = drm_atomic_helper_page_flip,
502 .set_property = mdp4_crtc_set_property,
503 .cursor_set = mdp4_crtc_cursor_set,
504 .cursor_move = mdp4_crtc_cursor_move,
505 .reset = drm_atomic_helper_crtc_reset,
506 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
507 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
510 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
511 .mode_fixup = mdp4_crtc_mode_fixup,
512 .mode_set_nofb = mdp4_crtc_mode_set_nofb,
513 .disable = mdp4_crtc_disable,
514 .enable = mdp4_crtc_enable,
515 .atomic_check = mdp4_crtc_atomic_check,
516 .atomic_begin = mdp4_crtc_atomic_begin,
517 .atomic_flush = mdp4_crtc_atomic_flush,
520 static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
522 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
523 struct drm_crtc *crtc = &mdp4_crtc->base;
524 struct msm_drm_private *priv = crtc->dev->dev_private;
527 mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
529 pending = atomic_xchg(&mdp4_crtc->pending, 0);
531 if (pending & PENDING_FLIP) {
532 complete_flip(crtc, NULL);
535 if (pending & PENDING_CURSOR) {
537 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
541 static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
543 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
544 struct drm_crtc *crtc = &mdp4_crtc->base;
545 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
549 static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
551 struct drm_device *dev = crtc->dev;
552 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
553 struct mdp4_kms *mdp4_kms = get_kms(crtc);
556 ret = drm_crtc_vblank_get(crtc);
560 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
561 !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
562 mdp4_crtc->flushed_mask),
563 msecs_to_jiffies(50));
565 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
567 mdp4_crtc->flushed_mask = 0;
569 drm_crtc_vblank_put(crtc);
572 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
574 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
575 return mdp4_crtc->vblank.irqmask;
578 /* set dma config, ie. the format the encoder wants. */
579 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
581 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
582 struct mdp4_kms *mdp4_kms = get_kms(crtc);
584 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
587 /* set interface for routing crtc->encoder: */
588 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
590 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
591 struct mdp4_kms *mdp4_kms = get_kms(crtc);
594 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
596 switch (mdp4_crtc->dma) {
598 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
599 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
602 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
603 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
606 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
607 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
611 if (intf == INTF_DSI_VIDEO) {
612 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
613 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
614 } else if (intf == INTF_DSI_CMD) {
615 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
616 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
619 mdp4_crtc->mixer = mixer;
623 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
625 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
628 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
630 /* wait_for_flush_done is the only case for now.
631 * Later we will have command mode CRTC to wait for
634 mdp4_crtc_wait_for_flush_done(crtc);
637 static const char *dma_names[] = {
638 "DMA_P", "DMA_S", "DMA_E",
641 /* initialize crtc */
642 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
643 struct drm_plane *plane, int id, int ovlp_id,
644 enum mdp4_dma dma_id)
646 struct drm_crtc *crtc = NULL;
647 struct mdp4_crtc *mdp4_crtc;
649 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
651 return ERR_PTR(-ENOMEM);
653 crtc = &mdp4_crtc->base;
657 mdp4_crtc->ovlp = ovlp_id;
658 mdp4_crtc->dma = dma_id;
660 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
661 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
663 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
664 mdp4_crtc->err.irq = mdp4_crtc_err_irq;
666 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
667 dma_names[dma_id], ovlp_id);
669 spin_lock_init(&mdp4_crtc->cursor.lock);
671 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
672 "unref cursor", unref_cursor_worker);
674 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
676 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);