2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
35 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
37 /* if there is a pending flip, these will be non-null: */
38 struct drm_pending_vblank_event *event;
40 /* Bits have been flushed at the last commit,
41 * used to decide if a vsync has happened since last commit.
45 #define PENDING_CURSOR 0x1
46 #define PENDING_FLIP 0x2
49 /* for unref'ing cursor bo's after scanout completes: */
50 struct drm_flip_work unref_cursor_work;
52 struct mdp_irq vblank;
54 struct mdp_irq pp_done;
56 struct completion pp_completion;
59 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
62 /* current cursor being scanned out: */
63 struct drm_gem_object *scanout_bo;
64 uint32_t width, height;
68 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
70 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
72 struct msm_drm_private *priv = crtc->dev->dev_private;
73 return to_mdp5_kms(to_mdp_kms(priv->kms));
76 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
80 atomic_or(pending, &mdp5_crtc->pending);
81 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
84 static void request_pp_done_pending(struct drm_crtc *crtc)
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 reinit_completion(&mdp5_crtc->pp_completion);
90 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
92 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
93 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
94 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
96 DBG("%s: flush=%08x", crtc->name, flush_mask);
97 return mdp5_ctl_commit(ctl, pipeline, flush_mask);
101 * flush updates, to make sure hw is updated to new scanout fb,
102 * so that we can safely queue unref to current fb (ie. next
103 * vblank we know hw is done w/ previous scanout_fb).
105 static u32 crtc_flush_all(struct drm_crtc *crtc)
107 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
108 struct mdp5_hw_mixer *mixer, *r_mixer;
109 struct drm_plane *plane;
110 uint32_t flush_mask = 0;
112 /* this should not happen: */
113 if (WARN_ON(!mdp5_cstate->ctl))
116 drm_atomic_crtc_for_each_plane(plane, crtc) {
117 flush_mask |= mdp5_plane_get_flush(plane);
120 mixer = mdp5_cstate->pipeline.mixer;
121 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
123 r_mixer = mdp5_cstate->pipeline.r_mixer;
125 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
127 return crtc_flush(crtc, flush_mask);
130 /* if file!=NULL, this is preclose potential cancel-flip path */
131 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
133 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
134 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
135 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
136 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
137 struct drm_device *dev = crtc->dev;
138 struct drm_pending_vblank_event *event;
141 spin_lock_irqsave(&dev->event_lock, flags);
142 event = mdp5_crtc->event;
144 mdp5_crtc->event = NULL;
145 DBG("%s: send event: %p", crtc->name, event);
146 drm_crtc_send_vblank_event(crtc, event);
148 spin_unlock_irqrestore(&dev->event_lock, flags);
150 if (ctl && !crtc->state->enable) {
151 /* set STAGE_UNUSED for all layers */
152 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
153 /* XXX: What to do here? */
154 /* mdp5_crtc->ctl = NULL; */
158 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
160 struct mdp5_crtc *mdp5_crtc =
161 container_of(work, struct mdp5_crtc, unref_cursor_work);
162 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
164 msm_gem_put_iova(val, mdp5_kms->id);
165 drm_gem_object_unreference_unlocked(val);
168 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
170 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
172 drm_crtc_cleanup(crtc);
173 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
178 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
181 case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
182 case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
183 case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
184 case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
185 case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
186 case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
187 case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
194 * left/right pipe offsets for the stage array used in blend_setup()
200 * blend_setup() - blend all the planes of a CRTC
202 * If no base layer is available, border will be enabled as the base layer.
203 * Otherwise all layers will be blended based on their stage calculated
204 * in mdp5_crtc_atomic_check.
206 static void blend_setup(struct drm_crtc *crtc)
208 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
209 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
210 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
211 struct mdp5_kms *mdp5_kms = get_kms(crtc);
212 struct drm_plane *plane;
213 const struct mdp5_cfg_hw *hw_cfg;
214 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
215 const struct mdp_format *format;
216 struct mdp5_hw_mixer *mixer = pipeline->mixer;
217 uint32_t lm = mixer->lm;
218 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
219 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
220 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
221 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
223 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
224 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
225 int i, plane_cnt = 0;
226 bool bg_alpha_enabled = false;
227 u32 mixer_op_mode = 0;
229 #define blender(stage) ((stage) - STAGE0)
231 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
233 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
235 /* ctl could be released already when we are shutting down: */
236 /* XXX: Can this happen now? */
240 /* Collect all plane information */
241 drm_atomic_crtc_for_each_plane(plane, crtc) {
242 enum mdp5_pipe right_pipe;
244 pstate = to_mdp5_plane_state(plane->state);
245 pstates[pstate->stage] = pstate;
246 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
248 * if we have a right mixer, stage the same pipe as we
249 * have on the left mixer
252 r_stage[pstate->stage][PIPE_LEFT] =
253 mdp5_plane_pipe(plane);
255 * if we have a right pipe (i.e, the plane comprises of 2
256 * hwpipes, then stage the right pipe on the right side of both
259 right_pipe = mdp5_plane_right_pipe(plane);
261 stage[pstate->stage][PIPE_RIGHT] = right_pipe;
262 r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
268 if (!pstates[STAGE_BASE]) {
269 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
270 DBG("Border Color is enabled");
271 } else if (plane_cnt) {
272 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
274 if (format->alpha_enable)
275 bg_alpha_enabled = true;
278 /* The reset for blending */
279 for (i = STAGE0; i <= STAGE_MAX; i++) {
283 format = to_mdp_format(
284 msm_framebuffer_format(pstates[i]->base.fb));
285 plane = pstates[i]->base.plane;
286 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
287 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
288 fg_alpha = pstates[i]->alpha;
289 bg_alpha = 0xFF - pstates[i]->alpha;
291 if (!format->alpha_enable && bg_alpha_enabled)
294 mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
296 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
298 if (format->alpha_enable && pstates[i]->premultiplied) {
299 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
300 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
301 if (fg_alpha != 0xff) {
304 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
305 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
307 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
309 } else if (format->alpha_enable) {
310 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
311 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
312 if (fg_alpha != 0xff) {
315 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
316 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
317 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
318 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
320 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
324 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
325 blender(i)), blend_op);
326 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
327 blender(i)), fg_alpha);
328 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
329 blender(i)), bg_alpha);
331 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
332 blender(i)), blend_op);
333 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
334 blender(i)), fg_alpha);
335 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
336 blender(i)), bg_alpha);
340 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
341 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
342 val | mixer_op_mode);
344 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
345 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
346 val | mixer_op_mode);
349 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
352 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
355 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
357 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
359 struct mdp5_kms *mdp5_kms = get_kms(crtc);
360 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
361 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
362 uint32_t lm = mixer->lm;
363 u32 mixer_width, val;
365 struct drm_display_mode *mode;
367 if (WARN_ON(!crtc->state))
370 mode = &crtc->state->adjusted_mode;
372 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
373 crtc->name, mode->base.id, mode->name,
374 mode->vrefresh, mode->clock,
375 mode->hdisplay, mode->hsync_start,
376 mode->hsync_end, mode->htotal,
377 mode->vdisplay, mode->vsync_start,
378 mode->vsync_end, mode->vtotal,
379 mode->type, mode->flags);
381 mixer_width = mode->hdisplay;
385 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
386 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
387 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
388 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
390 /* Assign mixer to LEFT side in source split mode */
391 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
392 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
393 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
396 u32 r_lm = r_mixer->lm;
398 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
399 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
400 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
402 /* Assign mixer to RIGHT side in source split mode */
403 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
404 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
405 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
408 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
411 static void mdp5_crtc_disable(struct drm_crtc *crtc)
413 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
414 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
415 struct mdp5_kms *mdp5_kms = get_kms(crtc);
417 DBG("%s", crtc->name);
419 if (WARN_ON(!mdp5_crtc->enabled))
422 if (mdp5_cstate->cmd_mode)
423 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
425 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
426 mdp5_disable(mdp5_kms);
428 mdp5_crtc->enabled = false;
431 static void mdp5_crtc_enable(struct drm_crtc *crtc)
433 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
434 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
435 struct mdp5_kms *mdp5_kms = get_kms(crtc);
437 DBG("%s", crtc->name);
439 if (WARN_ON(mdp5_crtc->enabled))
442 mdp5_enable(mdp5_kms);
443 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
445 if (mdp5_cstate->cmd_mode)
446 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
448 mdp5_crtc->enabled = true;
451 int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
452 struct drm_crtc_state *new_crtc_state,
453 bool need_right_mixer)
455 struct mdp5_crtc_state *mdp5_cstate =
456 to_mdp5_crtc_state(new_crtc_state);
457 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
458 struct mdp5_interface *intf;
459 bool new_mixer = false;
461 new_mixer = !pipeline->mixer;
463 if ((need_right_mixer && !pipeline->r_mixer) ||
464 (!need_right_mixer && pipeline->r_mixer))
468 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
469 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
473 caps = MDP_LM_CAP_DISPLAY;
474 if (need_right_mixer)
475 caps |= MDP_LM_CAP_PAIR;
477 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
478 &pipeline->mixer, need_right_mixer ?
479 &pipeline->r_mixer : NULL);
483 mdp5_mixer_release(new_crtc_state->state, old_mixer);
485 mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
486 if (!need_right_mixer)
487 pipeline->r_mixer = NULL;
492 * these should have been already set up in the encoder's atomic
493 * check (called by drm_atomic_helper_check_modeset)
495 intf = pipeline->intf;
497 mdp5_cstate->err_irqmask = intf2err(intf->num);
498 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
500 if ((intf->type == INTF_DSI) &&
501 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
502 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
503 mdp5_cstate->cmd_mode = true;
505 mdp5_cstate->pp_done_irqmask = 0;
506 mdp5_cstate->cmd_mode = false;
513 struct drm_plane *plane;
514 struct mdp5_plane_state *state;
517 static int pstate_cmp(const void *a, const void *b)
519 struct plane_state *pa = (struct plane_state *)a;
520 struct plane_state *pb = (struct plane_state *)b;
521 return pa->state->zpos - pb->state->zpos;
524 /* is there a helper for this? */
525 static bool is_fullscreen(struct drm_crtc_state *cstate,
526 struct drm_plane_state *pstate)
528 return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
529 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
530 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
533 enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
534 struct drm_crtc_state *new_crtc_state,
535 struct drm_plane_state *bpstate)
537 struct mdp5_crtc_state *mdp5_cstate =
538 to_mdp5_crtc_state(new_crtc_state);
541 * if we're in source split mode, it's mandatory to have
542 * border out on the base stage
544 if (mdp5_cstate->pipeline.r_mixer)
547 /* if the bottom-most layer is not fullscreen, we need to use
548 * it for solid-color:
550 if (!is_fullscreen(new_crtc_state, bpstate))
556 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
557 struct drm_crtc_state *state)
559 struct mdp5_kms *mdp5_kms = get_kms(crtc);
560 struct drm_plane *plane;
561 struct drm_device *dev = crtc->dev;
562 struct plane_state pstates[STAGE_MAX + 1];
563 const struct mdp5_cfg_hw *hw_cfg;
564 const struct drm_plane_state *pstate;
565 const struct drm_display_mode *mode = &state->adjusted_mode;
566 bool cursor_plane = false;
567 bool need_right_mixer = false;
570 enum mdp_mixer_stage_id start;
572 DBG("%s: check", crtc->name);
574 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
575 pstates[cnt].plane = plane;
576 pstates[cnt].state = to_mdp5_plane_state(pstate);
579 * if any plane on this crtc uses 2 hwpipes, then we need
580 * the crtc to have a right hwmixer.
582 if (pstates[cnt].state->r_hwpipe)
583 need_right_mixer = true;
586 if (plane->type == DRM_PLANE_TYPE_CURSOR)
590 /* bail out early if there aren't any planes */
594 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
597 * we need a right hwmixer if the mode's width is greater than a single
600 if (mode->hdisplay > hw_cfg->lm.max_width)
601 need_right_mixer = true;
603 ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
605 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
609 /* assign a stage based on sorted zpos property */
610 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
612 /* trigger a warning if cursor isn't the highest zorder */
613 WARN_ON(cursor_plane &&
614 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
616 start = get_start_stage(crtc, state, &pstates[0].state->base);
618 /* verify that there are not too many planes attached to crtc
619 * and that we don't have conflicting mixer stages:
621 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
622 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
627 for (i = 0; i < cnt; i++) {
628 if (cursor_plane && (i == (cnt - 1)))
629 pstates[i].state->stage = hw_cfg->lm.nb_stages;
631 pstates[i].state->stage = start + i;
632 DBG("%s: assign pipe %s on stage=%d", crtc->name,
633 pstates[i].plane->name,
634 pstates[i].state->stage);
640 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
641 struct drm_crtc_state *old_crtc_state)
643 DBG("%s: begin", crtc->name);
646 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
647 struct drm_crtc_state *old_crtc_state)
649 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
650 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
651 struct drm_device *dev = crtc->dev;
654 DBG("%s: event: %p", crtc->name, crtc->state->event);
656 WARN_ON(mdp5_crtc->event);
658 spin_lock_irqsave(&dev->event_lock, flags);
659 mdp5_crtc->event = crtc->state->event;
660 spin_unlock_irqrestore(&dev->event_lock, flags);
663 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
664 * it means we are trying to flush a CRTC whose state is disabled:
665 * nothing else needs to be done.
667 /* XXX: Can this happen now ? */
668 if (unlikely(!mdp5_cstate->ctl))
673 /* PP_DONE irq is only used by command mode for now.
674 * It is better to request pending before FLUSH and START trigger
675 * to make sure no pp_done irq missed.
676 * This is safe because no pp_done will happen before SW trigger
679 if (mdp5_cstate->cmd_mode)
680 request_pp_done_pending(crtc);
682 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
684 /* XXX are we leaking out state here? */
685 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
686 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
687 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
689 request_pending(crtc, PENDING_FLIP);
692 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
694 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
695 uint32_t xres = crtc->mode.hdisplay;
696 uint32_t yres = crtc->mode.vdisplay;
699 * Cursor Region Of Interest (ROI) is a plane read from cursor
700 * buffer to render. The ROI region is determined by the visibility of
701 * the cursor point. In the default Cursor image the cursor point will
702 * be at the top left of the cursor image, unless it is specified
703 * otherwise using hotspot feature.
705 * If the cursor point reaches the right (xres - x < cursor.width) or
706 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
707 * width and ROI height need to be evaluated to crop the cursor image
709 * (xres-x) will be new cursor width when x > (xres - cursor.width)
710 * (yres-y) will be new cursor height when y > (yres - cursor.height)
712 *roi_w = min(mdp5_crtc->cursor.width, xres -
713 mdp5_crtc->cursor.x);
714 *roi_h = min(mdp5_crtc->cursor.height, yres -
715 mdp5_crtc->cursor.y);
718 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
719 struct drm_file *file, uint32_t handle,
720 uint32_t width, uint32_t height)
722 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
723 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
724 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
725 struct drm_device *dev = crtc->dev;
726 struct mdp5_kms *mdp5_kms = get_kms(crtc);
727 struct drm_gem_object *cursor_bo, *old_bo = NULL;
728 uint32_t blendcfg, stride;
729 uint64_t cursor_addr;
730 struct mdp5_ctl *ctl;
732 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
733 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
734 uint32_t roi_w, roi_h;
735 bool cursor_enable = true;
738 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
739 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
743 ctl = mdp5_cstate->ctl;
747 /* don't support LM cursors when we we have source split enabled */
748 if (mdp5_cstate->pipeline.r_mixer)
753 cursor_enable = false;
757 cursor_bo = drm_gem_object_lookup(file, handle);
761 ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
765 lm = mdp5_cstate->pipeline.mixer->lm;
766 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
768 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
769 old_bo = mdp5_crtc->cursor.scanout_bo;
771 mdp5_crtc->cursor.scanout_bo = cursor_bo;
772 mdp5_crtc->cursor.width = width;
773 mdp5_crtc->cursor.height = height;
775 get_roi(crtc, &roi_w, &roi_h);
777 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
778 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
779 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
780 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
781 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
782 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
783 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
784 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
785 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
786 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
788 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
789 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
790 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
792 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
795 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
797 dev_err(dev->dev, "failed to %sable cursor: %d\n",
798 cursor_enable ? "en" : "dis", ret);
802 crtc_flush(crtc, flush_mask);
806 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
807 /* enable vblank to complete cursor work: */
808 request_pending(crtc, PENDING_CURSOR);
813 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
815 struct mdp5_kms *mdp5_kms = get_kms(crtc);
816 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
817 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
818 uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
819 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
824 /* don't support LM cursors when we we have source split enabled */
825 if (mdp5_cstate->pipeline.r_mixer)
828 /* In case the CRTC is disabled, just drop the cursor update */
829 if (unlikely(!crtc->state->enable))
832 mdp5_crtc->cursor.x = x = max(x, 0);
833 mdp5_crtc->cursor.y = y = max(y, 0);
835 get_roi(crtc, &roi_w, &roi_h);
837 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
838 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
839 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
840 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
841 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
842 MDP5_LM_CURSOR_START_XY_Y_START(y) |
843 MDP5_LM_CURSOR_START_XY_X_START(x));
844 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
846 crtc_flush(crtc, flush_mask);
852 mdp5_crtc_atomic_print_state(struct drm_printer *p,
853 const struct drm_crtc_state *state)
855 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
856 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
857 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
859 if (WARN_ON(!pipeline))
862 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
863 pipeline->mixer->name : "(null)");
865 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
866 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
867 pipeline->r_mixer->name : "(null)");
870 static void mdp5_crtc_reset(struct drm_crtc *crtc)
872 struct mdp5_crtc_state *mdp5_cstate;
875 __drm_atomic_helper_crtc_destroy_state(crtc->state);
876 kfree(to_mdp5_crtc_state(crtc->state));
879 mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
882 mdp5_cstate->base.crtc = crtc;
883 crtc->state = &mdp5_cstate->base;
887 static struct drm_crtc_state *
888 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
890 struct mdp5_crtc_state *mdp5_cstate;
892 if (WARN_ON(!crtc->state))
895 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
896 sizeof(*mdp5_cstate), GFP_KERNEL);
900 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
902 return &mdp5_cstate->base;
905 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
907 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
909 __drm_atomic_helper_crtc_destroy_state(state);
914 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
915 .set_config = drm_atomic_helper_set_config,
916 .destroy = mdp5_crtc_destroy,
917 .page_flip = drm_atomic_helper_page_flip,
918 .set_property = drm_atomic_helper_crtc_set_property,
919 .reset = mdp5_crtc_reset,
920 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
921 .atomic_destroy_state = mdp5_crtc_destroy_state,
922 .cursor_set = mdp5_crtc_cursor_set,
923 .cursor_move = mdp5_crtc_cursor_move,
924 .atomic_print_state = mdp5_crtc_atomic_print_state,
927 static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
928 .set_config = drm_atomic_helper_set_config,
929 .destroy = mdp5_crtc_destroy,
930 .page_flip = drm_atomic_helper_page_flip,
931 .set_property = drm_atomic_helper_crtc_set_property,
932 .reset = mdp5_crtc_reset,
933 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
934 .atomic_destroy_state = mdp5_crtc_destroy_state,
935 .atomic_print_state = mdp5_crtc_atomic_print_state,
938 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
939 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
940 .disable = mdp5_crtc_disable,
941 .enable = mdp5_crtc_enable,
942 .atomic_check = mdp5_crtc_atomic_check,
943 .atomic_begin = mdp5_crtc_atomic_begin,
944 .atomic_flush = mdp5_crtc_atomic_flush,
947 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
949 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
950 struct drm_crtc *crtc = &mdp5_crtc->base;
951 struct msm_drm_private *priv = crtc->dev->dev_private;
954 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
956 pending = atomic_xchg(&mdp5_crtc->pending, 0);
958 if (pending & PENDING_FLIP) {
959 complete_flip(crtc, NULL);
962 if (pending & PENDING_CURSOR)
963 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
966 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
968 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
970 DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
973 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
975 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
978 complete(&mdp5_crtc->pp_completion);
981 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
983 struct drm_device *dev = crtc->dev;
984 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
985 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
988 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
989 msecs_to_jiffies(50));
991 dev_warn(dev->dev, "pp done time out, lm=%d\n",
992 mdp5_cstate->pipeline.mixer->lm);
995 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
997 struct drm_device *dev = crtc->dev;
998 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
999 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1000 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1003 /* Should not call this function if crtc is disabled. */
1007 ret = drm_crtc_vblank_get(crtc);
1011 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1012 ((mdp5_ctl_get_commit_status(ctl) &
1013 mdp5_crtc->flushed_mask) == 0),
1014 msecs_to_jiffies(50));
1016 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1018 mdp5_crtc->flushed_mask = 0;
1020 drm_crtc_vblank_put(crtc);
1023 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1025 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1026 return mdp5_crtc->vblank.irqmask;
1029 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1031 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1032 struct mdp5_kms *mdp5_kms = get_kms(crtc);
1034 /* should this be done elsewhere ? */
1035 mdp_irq_update(&mdp5_kms->base);
1037 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1040 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1042 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1044 return mdp5_cstate->ctl;
1047 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1049 struct mdp5_crtc_state *mdp5_cstate;
1052 return ERR_PTR(-EINVAL);
1054 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1056 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1057 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1060 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1062 struct mdp5_crtc_state *mdp5_cstate;
1065 return ERR_PTR(-EINVAL);
1067 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1069 return &mdp5_cstate->pipeline;
1072 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1074 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1076 if (mdp5_cstate->cmd_mode)
1077 mdp5_crtc_wait_for_pp_done(crtc);
1079 mdp5_crtc_wait_for_flush_done(crtc);
1082 /* initialize crtc */
1083 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1084 struct drm_plane *plane,
1085 struct drm_plane *cursor_plane, int id)
1087 struct drm_crtc *crtc = NULL;
1088 struct mdp5_crtc *mdp5_crtc;
1090 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1092 return ERR_PTR(-ENOMEM);
1094 crtc = &mdp5_crtc->base;
1098 spin_lock_init(&mdp5_crtc->lm_lock);
1099 spin_lock_init(&mdp5_crtc->cursor.lock);
1100 init_completion(&mdp5_crtc->pp_completion);
1102 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1103 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1104 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1107 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1108 &mdp5_crtc_no_lm_cursor_funcs, NULL);
1110 drm_crtc_init_with_planes(dev, crtc, plane, NULL,
1111 &mdp5_crtc_funcs, NULL);
1113 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1114 "unref cursor", unref_cursor_worker);
1116 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);