2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
4 * Copyright (c) 2013 Texas Instruments Inc.
5 * David Griego, <dagriego@biglakesoftware.com>
6 * Dale Farnsworth, <dale@farnsworth.org>
7 * Archit Taneja, <archit@ti.com>
9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
10 * Pawel Osciak, <pawel@osciak.com>
11 * Marek Szyprowski, <m.szyprowski@samsung.com>
13 * Based on the virtual v4l2-mem2mem example device
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License version 2 as published by
17 * the Free Software Foundation
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
24 #include <linux/interrupt.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/videodev2.h>
33 #include <linux/log2.h>
34 #include <linux/sizes.h>
36 #include <media/v4l2-common.h>
37 #include <media/v4l2-ctrls.h>
38 #include <media/v4l2-device.h>
39 #include <media/v4l2-event.h>
40 #include <media/v4l2-ioctl.h>
41 #include <media/v4l2-mem2mem.h>
42 #include <media/videobuf2-core.h>
43 #include <media/videobuf2-dma-contig.h>
50 #define VPE_MODULE_NAME "vpe"
52 /* minimum and maximum frame sizes */
58 /* required alignments */
59 #define S_ALIGN 0 /* multiple of 1 */
60 #define H_ALIGN 1 /* multiple of 2 */
62 /* flags that indicate a format can be used for capture/output */
63 #define VPE_FMT_TYPE_CAPTURE (1 << 0)
64 #define VPE_FMT_TYPE_OUTPUT (1 << 1)
66 /* used as plane indices */
67 #define VPE_MAX_PLANES 2
71 /* per m2m context info */
72 #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
74 #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
77 * each VPE context can need up to 3 config desciptors, 7 input descriptors,
78 * 3 output descriptors, and 10 control descriptors
80 #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
81 13 * VPDMA_CFD_CTD_DESC_SIZE)
83 #define vpe_dbg(vpedev, fmt, arg...) \
84 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
85 #define vpe_err(vpedev, fmt, arg...) \
86 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
88 struct vpe_us_coeffs {
89 unsigned short anchor_fid0_c0;
90 unsigned short anchor_fid0_c1;
91 unsigned short anchor_fid0_c2;
92 unsigned short anchor_fid0_c3;
93 unsigned short interp_fid0_c0;
94 unsigned short interp_fid0_c1;
95 unsigned short interp_fid0_c2;
96 unsigned short interp_fid0_c3;
97 unsigned short anchor_fid1_c0;
98 unsigned short anchor_fid1_c1;
99 unsigned short anchor_fid1_c2;
100 unsigned short anchor_fid1_c3;
101 unsigned short interp_fid1_c0;
102 unsigned short interp_fid1_c1;
103 unsigned short interp_fid1_c2;
104 unsigned short interp_fid1_c3;
108 * Default upsampler coefficients
110 static const struct vpe_us_coeffs us_coeffs[] = {
112 /* Coefficients for progressive input */
113 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
114 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
117 /* Coefficients for Top Field Interlaced input */
118 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
119 /* Coefficients for Bottom Field Interlaced input */
120 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
125 * the following registers are for configuring some of the parameters of the
126 * motion and edge detection blocks inside DEI, these generally remain the same,
127 * these could be passed later via userspace if some one needs to tweak these.
129 struct vpe_dei_regs {
130 unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
131 unsigned long edi_config_reg; /* VPE_DEI_REG3 */
132 unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
133 unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
134 unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
135 unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
139 * default expert DEI register values, unlikely to be modified.
141 static const struct vpe_dei_regs dei_regs = {
142 .mdt_spacial_freq_thr_reg = 0x020C0804u,
143 .edi_config_reg = 0x0118100Fu,
144 .edi_lut_reg0 = 0x08040200u,
145 .edi_lut_reg1 = 0x1010100Cu,
146 .edi_lut_reg2 = 0x10101010u,
147 .edi_lut_reg3 = 0x10101010u,
151 * The port_data structure contains per-port data.
153 struct vpe_port_data {
154 enum vpdma_channel channel; /* VPDMA channel */
155 u8 vb_index; /* input frame f, f-1, f-2 index */
156 u8 vb_part; /* plane index for co-panar formats */
160 * Define indices into the port_data tables
162 #define VPE_PORT_LUMA1_IN 0
163 #define VPE_PORT_CHROMA1_IN 1
164 #define VPE_PORT_LUMA2_IN 2
165 #define VPE_PORT_CHROMA2_IN 3
166 #define VPE_PORT_LUMA3_IN 4
167 #define VPE_PORT_CHROMA3_IN 5
168 #define VPE_PORT_MV_IN 6
169 #define VPE_PORT_MV_OUT 7
170 #define VPE_PORT_LUMA_OUT 8
171 #define VPE_PORT_CHROMA_OUT 9
172 #define VPE_PORT_RGB_OUT 10
174 static const struct vpe_port_data port_data[11] = {
175 [VPE_PORT_LUMA1_IN] = {
176 .channel = VPE_CHAN_LUMA1_IN,
180 [VPE_PORT_CHROMA1_IN] = {
181 .channel = VPE_CHAN_CHROMA1_IN,
183 .vb_part = VPE_CHROMA,
185 [VPE_PORT_LUMA2_IN] = {
186 .channel = VPE_CHAN_LUMA2_IN,
190 [VPE_PORT_CHROMA2_IN] = {
191 .channel = VPE_CHAN_CHROMA2_IN,
193 .vb_part = VPE_CHROMA,
195 [VPE_PORT_LUMA3_IN] = {
196 .channel = VPE_CHAN_LUMA3_IN,
200 [VPE_PORT_CHROMA3_IN] = {
201 .channel = VPE_CHAN_CHROMA3_IN,
203 .vb_part = VPE_CHROMA,
206 .channel = VPE_CHAN_MV_IN,
208 [VPE_PORT_MV_OUT] = {
209 .channel = VPE_CHAN_MV_OUT,
211 [VPE_PORT_LUMA_OUT] = {
212 .channel = VPE_CHAN_LUMA_OUT,
215 [VPE_PORT_CHROMA_OUT] = {
216 .channel = VPE_CHAN_CHROMA_OUT,
217 .vb_part = VPE_CHROMA,
219 [VPE_PORT_RGB_OUT] = {
220 .channel = VPE_CHAN_RGB_OUT,
226 /* driver info for each of the supported video formats */
228 char *name; /* human-readable name */
229 u32 fourcc; /* standard format identifier */
230 u8 types; /* CAPTURE and/or OUTPUT */
231 u8 coplanar; /* set for unpacked Luma and Chroma */
232 /* vpdma format info for each plane */
233 struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
236 static struct vpe_fmt vpe_formats[] = {
238 .name = "YUV 422 co-planar",
239 .fourcc = V4L2_PIX_FMT_NV16,
240 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
242 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
243 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
247 .name = "YUV 420 co-planar",
248 .fourcc = V4L2_PIX_FMT_NV12,
249 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
251 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
252 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
256 .name = "YUYV 422 packed",
257 .fourcc = V4L2_PIX_FMT_YUYV,
258 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
260 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
264 .name = "UYVY 422 packed",
265 .fourcc = V4L2_PIX_FMT_UYVY,
266 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
268 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
272 .name = "RGB888 packed",
273 .fourcc = V4L2_PIX_FMT_RGB24,
274 .types = VPE_FMT_TYPE_CAPTURE,
276 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
281 .fourcc = V4L2_PIX_FMT_RGB32,
282 .types = VPE_FMT_TYPE_CAPTURE,
284 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
288 .name = "BGR888 packed",
289 .fourcc = V4L2_PIX_FMT_BGR24,
290 .types = VPE_FMT_TYPE_CAPTURE,
292 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
297 .fourcc = V4L2_PIX_FMT_BGR32,
298 .types = VPE_FMT_TYPE_CAPTURE,
300 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
306 * per-queue, driver-specific private data.
307 * there is one source queue and one destination queue for each m2m context.
310 unsigned int width; /* frame width */
311 unsigned int height; /* frame height */
312 unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
313 enum v4l2_colorspace colorspace;
314 enum v4l2_field field; /* supported field value */
316 unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
317 struct v4l2_rect c_rect; /* crop/compose rectangle */
318 struct vpe_fmt *fmt; /* format info */
321 /* vpe_q_data flag bits */
322 #define Q_DATA_FRAME_1D (1 << 0)
323 #define Q_DATA_MODE_TILED (1 << 1)
324 #define Q_DATA_INTERLACED (1 << 2)
331 /* find our format description corresponding to the passed v4l2_format */
332 static struct vpe_fmt *find_format(struct v4l2_format *f)
337 for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
338 fmt = &vpe_formats[k];
339 if (fmt->fourcc == f->fmt.pix.pixelformat)
347 * there is one vpe_dev structure in the driver, it is shared by
351 struct v4l2_device v4l2_dev;
352 struct video_device vfd;
353 struct v4l2_m2m_dev *m2m_dev;
355 atomic_t num_instances; /* count of driver instances */
356 dma_addr_t loaded_mmrs; /* shadow mmrs in device */
357 struct mutex dev_mutex;
362 struct resource *res;
364 struct vb2_alloc_ctx *alloc_ctx;
365 struct vpdma_data *vpdma; /* vpdma data handle */
366 struct sc_data *sc; /* scaler data handle */
367 struct csc_data *csc; /* csc data handle */
371 * There is one vpe_ctx structure for each m2m context.
376 struct v4l2_m2m_ctx *m2m_ctx;
377 struct v4l2_ctrl_handler hdl;
379 unsigned int field; /* current field */
380 unsigned int sequence; /* current frame/field seq */
381 unsigned int aborting; /* abort after next irq */
383 unsigned int bufs_per_job; /* input buffers per batch */
384 unsigned int bufs_completed; /* bufs done in this batch */
386 struct vpe_q_data q_data[2]; /* src & dst queue data */
387 struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
388 struct vb2_buffer *dst_vb;
390 dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
391 void *mv_buf[2]; /* virtual addrs of motion vector bufs */
392 size_t mv_buf_size; /* current motion vector buffer size */
393 struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
394 struct vpdma_buf sc_coeff_h; /* h coeff buffer */
395 struct vpdma_buf sc_coeff_v; /* v coeff buffer */
396 struct vpdma_desc_list desc_list; /* DMA descriptor list */
398 bool deinterlacing; /* using de-interlacer */
399 bool load_mmrs; /* have new shadow reg values */
401 unsigned int src_mv_buf_selector;
406 * M2M devices get 2 queues.
407 * Return the queue given the type.
409 static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
410 enum v4l2_buf_type type)
413 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
414 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
415 return &ctx->q_data[Q_DATA_SRC];
416 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
417 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
418 return &ctx->q_data[Q_DATA_DST];
425 static u32 read_reg(struct vpe_dev *dev, int offset)
427 return ioread32(dev->base + offset);
430 static void write_reg(struct vpe_dev *dev, int offset, u32 value)
432 iowrite32(value, dev->base + offset);
435 /* register field read/write helpers */
436 static int get_field(u32 value, u32 mask, int shift)
438 return (value & (mask << shift)) >> shift;
441 static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
443 return get_field(read_reg(dev, offset), mask, shift);
446 static void write_field(u32 *valp, u32 field, u32 mask, int shift)
450 val &= ~(mask << shift);
451 val |= (field & mask) << shift;
455 static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
458 u32 val = read_reg(dev, offset);
460 write_field(&val, field, mask, shift);
462 write_reg(dev, offset, val);
466 * DMA address/data block for the shadow registers
469 struct vpdma_adb_hdr out_fmt_hdr;
472 struct vpdma_adb_hdr us1_hdr;
474 struct vpdma_adb_hdr us2_hdr;
476 struct vpdma_adb_hdr us3_hdr;
478 struct vpdma_adb_hdr dei_hdr;
480 struct vpdma_adb_hdr sc_hdr0;
483 struct vpdma_adb_hdr sc_hdr8;
486 struct vpdma_adb_hdr sc_hdr17;
489 struct vpdma_adb_hdr csc_hdr;
494 #define GET_OFFSET_TOP(ctx, obj, reg) \
495 ((obj)->res->start - ctx->dev->res->start + reg)
497 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
498 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
500 * Set the headers for all of the address/data block structures.
502 static void init_adb_hdrs(struct vpe_ctx *ctx)
504 VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
505 VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
506 VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
507 VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
508 VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
509 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
510 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
511 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
512 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
513 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
514 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
515 VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
516 GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
520 * Allocate or re-allocate the motion vector DMA buffers
521 * There are two buffers, one for input and one for output.
522 * However, the roles are reversed after each field is processed.
523 * In other words, after each field is processed, the previous
524 * output (dst) MV buffer becomes the new input (src) MV buffer.
526 static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
528 struct device *dev = ctx->dev->v4l2_dev.dev;
530 if (ctx->mv_buf_size == size)
534 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
538 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
544 ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
546 if (!ctx->mv_buf[0]) {
547 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
551 ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
553 if (!ctx->mv_buf[1]) {
554 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
555 dma_free_coherent(dev, size, ctx->mv_buf[0],
561 ctx->mv_buf_size = size;
562 ctx->src_mv_buf_selector = 0;
567 static void free_mv_buffers(struct vpe_ctx *ctx)
569 realloc_mv_buffers(ctx, 0);
573 * While de-interlacing, we keep the two most recent input buffers
574 * around. This function frees those two buffers when we have
575 * finished processing the current stream.
577 static void free_vbs(struct vpe_ctx *ctx)
579 struct vpe_dev *dev = ctx->dev;
582 if (ctx->src_vbs[2] == NULL)
585 spin_lock_irqsave(&dev->lock, flags);
586 if (ctx->src_vbs[2]) {
587 v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
588 v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
590 spin_unlock_irqrestore(&dev->lock, flags);
594 * Enable or disable the VPE clocks
596 static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
601 val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
602 write_reg(dev, VPE_CLK_ENABLE, val);
605 static void vpe_top_reset(struct vpe_dev *dev)
608 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
609 VPE_DATA_PATH_CLK_RESET_SHIFT);
611 usleep_range(100, 150);
613 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
614 VPE_DATA_PATH_CLK_RESET_SHIFT);
617 static void vpe_top_vpdma_reset(struct vpe_dev *dev)
619 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
620 VPE_VPDMA_CLK_RESET_SHIFT);
622 usleep_range(100, 150);
624 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
625 VPE_VPDMA_CLK_RESET_SHIFT);
629 * Load the correct of upsampler coefficients into the shadow MMRs
631 static void set_us_coefficients(struct vpe_ctx *ctx)
633 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
634 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
635 u32 *us1_reg = &mmr_adb->us1_regs[0];
636 u32 *us2_reg = &mmr_adb->us2_regs[0];
637 u32 *us3_reg = &mmr_adb->us3_regs[0];
638 const unsigned short *cp, *end_cp;
640 cp = &us_coeffs[0].anchor_fid0_c0;
642 if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
643 cp += sizeof(us_coeffs[0]) / sizeof(*cp);
645 end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
647 while (cp < end_cp) {
648 write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
649 write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
650 *us2_reg++ = *us1_reg;
651 *us3_reg++ = *us1_reg++;
653 ctx->load_mmrs = true;
657 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
659 static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
661 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
662 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
663 u32 *us1_reg0 = &mmr_adb->us1_regs[0];
664 u32 *us2_reg0 = &mmr_adb->us2_regs[0];
665 u32 *us3_reg0 = &mmr_adb->us3_regs[0];
670 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
671 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
674 if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
676 line_mode = 0; /* double lines to line buffer */
679 write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
680 write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
681 write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
684 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
685 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
686 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
688 /* frame start for input luma */
689 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
691 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
693 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
696 /* frame start for input chroma */
697 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
698 VPE_CHAN_CHROMA1_IN);
699 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
700 VPE_CHAN_CHROMA2_IN);
701 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
702 VPE_CHAN_CHROMA3_IN);
704 /* frame start for MV in client */
705 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
708 ctx->load_mmrs = true;
712 * Set the shadow registers that are modified when the source
715 static void set_src_registers(struct vpe_ctx *ctx)
717 set_us_coefficients(ctx);
721 * Set the shadow registers that are modified when the destination
724 static void set_dst_registers(struct vpe_ctx *ctx)
726 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
727 enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
728 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
731 if (clrspc == V4L2_COLORSPACE_SRGB)
732 val |= VPE_RGB_OUT_SELECT;
733 else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
734 val |= VPE_COLOR_SEPARATE_422;
737 * the source of CHR_DS and CSC is always the scaler, irrespective of
738 * whether it's used or not
740 val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
742 if (fmt->fourcc != V4L2_PIX_FMT_NV12)
743 val |= VPE_DS_BYPASS;
745 mmr_adb->out_fmt_reg[0] = val;
747 ctx->load_mmrs = true;
751 * Set the de-interlacer shadow register values
753 static void set_dei_regs(struct vpe_ctx *ctx)
755 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
756 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
757 unsigned int src_h = s_q_data->c_rect.height;
758 unsigned int src_w = s_q_data->c_rect.width;
759 u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
760 bool deinterlace = true;
764 * according to TRM, we should set DEI in progressive bypass mode when
765 * the input content is progressive, however, DEI is bypassed correctly
766 * for both progressive and interlace content in interlace bypass mode.
767 * It has been recommended not to use progressive bypass mode.
769 if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
770 !(s_q_data->flags & Q_DATA_INTERLACED)) {
772 val = VPE_DEI_INTERLACE_BYPASS;
775 src_h = deinterlace ? src_h * 2 : src_h;
777 val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
778 (src_w << VPE_DEI_WIDTH_SHIFT) |
783 ctx->load_mmrs = true;
786 static void set_dei_shadow_registers(struct vpe_ctx *ctx)
788 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
789 u32 *dei_mmr = &mmr_adb->dei_regs[0];
790 const struct vpe_dei_regs *cur = &dei_regs;
792 dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
793 dei_mmr[3] = cur->edi_config_reg;
794 dei_mmr[4] = cur->edi_lut_reg0;
795 dei_mmr[5] = cur->edi_lut_reg1;
796 dei_mmr[6] = cur->edi_lut_reg2;
797 dei_mmr[7] = cur->edi_lut_reg3;
799 ctx->load_mmrs = true;
803 * Set the shadow registers whose values are modified when either the
804 * source or destination format is changed.
806 static int set_srcdst_params(struct vpe_ctx *ctx)
808 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
809 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
810 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
811 unsigned int src_w = s_q_data->c_rect.width;
812 unsigned int src_h = s_q_data->c_rect.height;
813 unsigned int dst_w = d_q_data->c_rect.width;
814 unsigned int dst_h = d_q_data->c_rect.height;
819 ctx->field = V4L2_FIELD_TOP;
821 if ((s_q_data->flags & Q_DATA_INTERLACED) &&
822 !(d_q_data->flags & Q_DATA_INTERLACED)) {
824 const struct vpdma_data_format *mv =
825 &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
828 * we make sure that the source image has a 16 byte aligned
829 * stride, we need to do the same for the motion vector buffer
830 * by aligning it's stride to the next 16 byte boundry. this
831 * extra space will not be used by the de-interlacer, but will
832 * ensure that vpdma operates correctly
834 bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
836 mv_buf_size = bytes_per_line * s_q_data->height;
838 ctx->deinterlacing = true;
841 ctx->deinterlacing = false;
847 ret = realloc_mv_buffers(ctx, mv_buf_size);
851 set_cfg_and_line_modes(ctx);
854 csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
855 s_q_data->colorspace, d_q_data->colorspace);
857 sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
858 sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
860 sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
861 &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
862 src_w, src_h, dst_w, dst_h);
868 * Return the vpe_ctx structure for a given struct file
870 static struct vpe_ctx *file2ctx(struct file *file)
872 return container_of(file->private_data, struct vpe_ctx, fh);
880 * job_ready() - check whether an instance is ready to be scheduled to run
882 static int job_ready(void *priv)
884 struct vpe_ctx *ctx = priv;
885 int needed = ctx->bufs_per_job;
887 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
888 needed += 2; /* need additional two most recent fields */
890 if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
893 if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
899 static void job_abort(void *priv)
901 struct vpe_ctx *ctx = priv;
903 /* Will cancel the transaction in the next interrupt handler */
908 * Lock access to the device
910 static void vpe_lock(void *priv)
912 struct vpe_ctx *ctx = priv;
913 struct vpe_dev *dev = ctx->dev;
914 mutex_lock(&dev->dev_mutex);
917 static void vpe_unlock(void *priv)
919 struct vpe_ctx *ctx = priv;
920 struct vpe_dev *dev = ctx->dev;
921 mutex_unlock(&dev->dev_mutex);
924 static void vpe_dump_regs(struct vpe_dev *dev)
926 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
928 vpe_dbg(dev, "VPE Registers:\n");
932 DUMPREG(INT0_STATUS0_RAW);
933 DUMPREG(INT0_STATUS0);
934 DUMPREG(INT0_ENABLE0);
935 DUMPREG(INT0_STATUS1_RAW);
936 DUMPREG(INT0_STATUS1);
937 DUMPREG(INT0_ENABLE1);
940 DUMPREG(CLK_FORMAT_SELECT);
941 DUMPREG(CLK_RANGE_MAP);
966 DUMPREG(DEI_FRAME_SIZE);
968 DUMPREG(MDT_SF_THRESHOLD);
970 DUMPREG(DEI_EDI_LUT_R0);
971 DUMPREG(DEI_EDI_LUT_R1);
972 DUMPREG(DEI_EDI_LUT_R2);
973 DUMPREG(DEI_EDI_LUT_R3);
974 DUMPREG(DEI_FMD_WINDOW_R0);
975 DUMPREG(DEI_FMD_WINDOW_R1);
976 DUMPREG(DEI_FMD_CONTROL_R0);
977 DUMPREG(DEI_FMD_CONTROL_R1);
978 DUMPREG(DEI_FMD_STATUS_R0);
979 DUMPREG(DEI_FMD_STATUS_R1);
980 DUMPREG(DEI_FMD_STATUS_R2);
983 sc_dump_regs(dev->sc);
984 csc_dump_regs(dev->csc);
987 static void add_out_dtd(struct vpe_ctx *ctx, int port)
989 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
990 const struct vpe_port_data *p_data = &port_data[port];
991 struct vb2_buffer *vb = ctx->dst_vb;
992 struct vpe_fmt *fmt = q_data->fmt;
993 const struct vpdma_data_format *vpdma_fmt;
994 int mv_buf_selector = !ctx->src_mv_buf_selector;
998 if (port == VPE_PORT_MV_OUT) {
999 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1000 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1002 /* to incorporate interleaved formats */
1003 int plane = fmt->coplanar ? p_data->vb_part : 0;
1005 vpdma_fmt = fmt->vpdma_fmt[plane];
1006 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1009 "acquiring output buffer(%d) dma_addr failed\n",
1015 if (q_data->flags & Q_DATA_FRAME_1D)
1016 flags |= VPDMA_DATA_FRAME_1D;
1017 if (q_data->flags & Q_DATA_MODE_TILED)
1018 flags |= VPDMA_DATA_MODE_TILED;
1020 vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
1021 vpdma_fmt, dma_addr, p_data->channel, flags);
1024 static void add_in_dtd(struct vpe_ctx *ctx, int port)
1026 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1027 const struct vpe_port_data *p_data = &port_data[port];
1028 struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
1029 struct vpe_fmt *fmt = q_data->fmt;
1030 const struct vpdma_data_format *vpdma_fmt;
1031 int mv_buf_selector = ctx->src_mv_buf_selector;
1032 int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
1033 int frame_width, frame_height;
1034 dma_addr_t dma_addr;
1037 if (port == VPE_PORT_MV_IN) {
1038 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1039 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1041 /* to incorporate interleaved formats */
1042 int plane = fmt->coplanar ? p_data->vb_part : 0;
1044 vpdma_fmt = fmt->vpdma_fmt[plane];
1046 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1049 "acquiring input buffer(%d) dma_addr failed\n",
1055 if (q_data->flags & Q_DATA_FRAME_1D)
1056 flags |= VPDMA_DATA_FRAME_1D;
1057 if (q_data->flags & Q_DATA_MODE_TILED)
1058 flags |= VPDMA_DATA_MODE_TILED;
1060 frame_width = q_data->c_rect.width;
1061 frame_height = q_data->c_rect.height;
1063 if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
1066 vpdma_add_in_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
1067 vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
1068 frame_height, 0, 0);
1072 * Enable the expected IRQ sources
1074 static void enable_irqs(struct vpe_ctx *ctx)
1076 write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1077 write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1078 VPE_DS1_UV_ERROR_INT);
1080 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
1083 static void disable_irqs(struct vpe_ctx *ctx)
1085 write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1086 write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1088 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
1091 /* device_run() - prepares and starts the device
1093 * This function is only called when both the source and destination
1094 * buffers are in place.
1096 static void device_run(void *priv)
1098 struct vpe_ctx *ctx = priv;
1099 struct sc_data *sc = ctx->dev->sc;
1100 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1102 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
1103 ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1104 WARN_ON(ctx->src_vbs[2] == NULL);
1105 ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1106 WARN_ON(ctx->src_vbs[1] == NULL);
1109 ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1110 WARN_ON(ctx->src_vbs[0] == NULL);
1111 ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
1112 WARN_ON(ctx->dst_vb == NULL);
1114 /* config descriptors */
1115 if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1116 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1117 vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1118 ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1119 ctx->load_mmrs = false;
1122 if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
1124 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
1125 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1126 &ctx->sc_coeff_h, 0);
1128 sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
1129 sc->load_coeff_h = false;
1132 if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
1134 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
1135 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1136 &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
1138 sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
1139 sc->load_coeff_v = false;
1142 /* output data descriptors */
1143 if (ctx->deinterlacing)
1144 add_out_dtd(ctx, VPE_PORT_MV_OUT);
1146 if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1147 add_out_dtd(ctx, VPE_PORT_RGB_OUT);
1149 add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1150 if (d_q_data->fmt->coplanar)
1151 add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1154 /* input data descriptors */
1155 if (ctx->deinterlacing) {
1156 add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1157 add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1159 add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1160 add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1163 add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1164 add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1166 if (ctx->deinterlacing)
1167 add_in_dtd(ctx, VPE_PORT_MV_IN);
1169 /* sync on channel control descriptors for input ports */
1170 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1171 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1173 if (ctx->deinterlacing) {
1174 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1176 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1177 VPE_CHAN_CHROMA2_IN);
1179 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1181 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1182 VPE_CHAN_CHROMA3_IN);
1184 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1187 /* sync on channel control descriptors for output ports */
1188 if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1189 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1192 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1194 if (d_q_data->fmt->coplanar)
1195 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1196 VPE_CHAN_CHROMA_OUT);
1199 if (ctx->deinterlacing)
1200 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1204 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1205 vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
1208 static void dei_error(struct vpe_ctx *ctx)
1210 dev_warn(ctx->dev->v4l2_dev.dev,
1211 "received DEI error interrupt\n");
1214 static void ds1_uv_error(struct vpe_ctx *ctx)
1216 dev_warn(ctx->dev->v4l2_dev.dev,
1217 "received downsampler error interrupt\n");
1220 static irqreturn_t vpe_irq(int irq_vpe, void *data)
1222 struct vpe_dev *dev = (struct vpe_dev *)data;
1223 struct vpe_ctx *ctx;
1224 struct vpe_q_data *d_q_data;
1225 struct vb2_buffer *s_vb, *d_vb;
1226 struct v4l2_buffer *s_buf, *d_buf;
1227 unsigned long flags;
1230 irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1232 write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1233 vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1236 irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1238 write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1239 vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1242 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1244 vpe_err(dev, "instance released before end of transaction\n");
1249 if (irqst1 & VPE_DEI_ERROR_INT) {
1250 irqst1 &= ~VPE_DEI_ERROR_INT;
1253 if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1254 irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1260 if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1261 vpdma_clear_list_stat(ctx->dev->vpdma);
1263 irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1266 if (irqst0 | irqst1) {
1267 dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
1268 "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1274 vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1275 vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1276 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
1277 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
1279 vpdma_reset_desc_list(&ctx->desc_list);
1281 /* the previous dst mv buffer becomes the next src mv buffer */
1282 ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1287 s_vb = ctx->src_vbs[0];
1289 s_buf = &s_vb->v4l2_buf;
1290 d_buf = &d_vb->v4l2_buf;
1292 d_buf->flags = s_buf->flags;
1294 d_buf->timestamp = s_buf->timestamp;
1295 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
1296 d_buf->timecode = s_buf->timecode;
1298 d_buf->sequence = ctx->sequence;
1300 d_q_data = &ctx->q_data[Q_DATA_DST];
1301 if (d_q_data->flags & Q_DATA_INTERLACED) {
1302 d_buf->field = ctx->field;
1303 if (ctx->field == V4L2_FIELD_BOTTOM) {
1305 ctx->field = V4L2_FIELD_TOP;
1307 WARN_ON(ctx->field != V4L2_FIELD_TOP);
1308 ctx->field = V4L2_FIELD_BOTTOM;
1311 d_buf->field = V4L2_FIELD_NONE;
1315 if (ctx->deinterlacing)
1316 s_vb = ctx->src_vbs[2];
1318 spin_lock_irqsave(&dev->lock, flags);
1319 v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1320 v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1321 spin_unlock_irqrestore(&dev->lock, flags);
1323 if (ctx->deinterlacing) {
1324 ctx->src_vbs[2] = ctx->src_vbs[1];
1325 ctx->src_vbs[1] = ctx->src_vbs[0];
1328 ctx->bufs_completed++;
1329 if (ctx->bufs_completed < ctx->bufs_per_job) {
1335 vpe_dbg(ctx->dev, "finishing transaction\n");
1336 ctx->bufs_completed = 0;
1337 v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
1345 static int vpe_querycap(struct file *file, void *priv,
1346 struct v4l2_capability *cap)
1348 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
1349 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
1350 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1352 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
1353 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1357 static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1360 struct vpe_fmt *fmt = NULL;
1363 for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1364 if (vpe_formats[i].types & type) {
1365 if (index == f->index) {
1366 fmt = &vpe_formats[i];
1376 strncpy(f->description, fmt->name, sizeof(f->description) - 1);
1377 f->pixelformat = fmt->fourcc;
1381 static int vpe_enum_fmt(struct file *file, void *priv,
1382 struct v4l2_fmtdesc *f)
1384 if (V4L2_TYPE_IS_OUTPUT(f->type))
1385 return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1387 return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1390 static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1392 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1393 struct vpe_ctx *ctx = file2ctx(file);
1394 struct vb2_queue *vq;
1395 struct vpe_q_data *q_data;
1398 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1402 q_data = get_q_data(ctx, f->type);
1404 pix->width = q_data->width;
1405 pix->height = q_data->height;
1406 pix->pixelformat = q_data->fmt->fourcc;
1407 pix->field = q_data->field;
1409 if (V4L2_TYPE_IS_OUTPUT(f->type)) {
1410 pix->colorspace = q_data->colorspace;
1412 struct vpe_q_data *s_q_data;
1414 /* get colorspace from the source queue */
1415 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1417 pix->colorspace = s_q_data->colorspace;
1420 pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
1422 for (i = 0; i < pix->num_planes; i++) {
1423 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1424 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1430 static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1431 struct vpe_fmt *fmt, int type)
1433 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1434 struct v4l2_plane_pix_format *plane_fmt;
1435 unsigned int w_align;
1436 int i, depth, depth_bytes;
1438 if (!fmt || !(fmt->types & type)) {
1439 vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1444 if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
1445 pix->field = V4L2_FIELD_NONE;
1447 depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
1450 * the line stride should 16 byte aligned for VPDMA to work, based on
1451 * the bytes per pixel, figure out how much the width should be aligned
1452 * to make sure line stride is 16 byte aligned
1454 depth_bytes = depth >> 3;
1456 if (depth_bytes == 3)
1458 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1459 * really help in ensuring line stride is 16 byte aligned
1464 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1465 * can ensure a line stride alignment of 16 bytes. For example,
1466 * if bpp is 2, then the line stride can be 16 byte aligned if
1467 * the width is 8 byte aligned
1469 w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
1471 v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
1472 &pix->height, MIN_H, MAX_H, H_ALIGN,
1475 pix->num_planes = fmt->coplanar ? 2 : 1;
1476 pix->pixelformat = fmt->fourcc;
1478 if (!pix->colorspace) {
1479 if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
1480 fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
1481 fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
1482 fmt->fourcc == V4L2_PIX_FMT_BGR32) {
1483 pix->colorspace = V4L2_COLORSPACE_SRGB;
1485 if (pix->height > 1280) /* HD */
1486 pix->colorspace = V4L2_COLORSPACE_REC709;
1488 pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1492 memset(pix->reserved, 0, sizeof(pix->reserved));
1493 for (i = 0; i < pix->num_planes; i++) {
1494 plane_fmt = &pix->plane_fmt[i];
1495 depth = fmt->vpdma_fmt[i]->depth;
1498 plane_fmt->bytesperline = (pix->width * depth) >> 3;
1500 plane_fmt->bytesperline = pix->width;
1502 plane_fmt->sizeimage =
1503 (pix->height * pix->width * depth) >> 3;
1505 memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
1511 static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1513 struct vpe_ctx *ctx = file2ctx(file);
1514 struct vpe_fmt *fmt = find_format(f);
1516 if (V4L2_TYPE_IS_OUTPUT(f->type))
1517 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1519 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1522 static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1524 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1525 struct v4l2_plane_pix_format *plane_fmt;
1526 struct vpe_q_data *q_data;
1527 struct vb2_queue *vq;
1530 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1534 if (vb2_is_busy(vq)) {
1535 vpe_err(ctx->dev, "queue busy\n");
1539 q_data = get_q_data(ctx, f->type);
1543 q_data->fmt = find_format(f);
1544 q_data->width = pix->width;
1545 q_data->height = pix->height;
1546 q_data->colorspace = pix->colorspace;
1547 q_data->field = pix->field;
1549 for (i = 0; i < pix->num_planes; i++) {
1550 plane_fmt = &pix->plane_fmt[i];
1552 q_data->bytesperline[i] = plane_fmt->bytesperline;
1553 q_data->sizeimage[i] = plane_fmt->sizeimage;
1556 q_data->c_rect.left = 0;
1557 q_data->c_rect.top = 0;
1558 q_data->c_rect.width = q_data->width;
1559 q_data->c_rect.height = q_data->height;
1561 if (q_data->field == V4L2_FIELD_ALTERNATE)
1562 q_data->flags |= Q_DATA_INTERLACED;
1564 q_data->flags &= ~Q_DATA_INTERLACED;
1566 vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1567 f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
1568 q_data->bytesperline[VPE_LUMA]);
1569 if (q_data->fmt->coplanar)
1570 vpe_dbg(ctx->dev, " bpl_uv %d\n",
1571 q_data->bytesperline[VPE_CHROMA]);
1576 static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1579 struct vpe_ctx *ctx = file2ctx(file);
1581 ret = vpe_try_fmt(file, priv, f);
1585 ret = __vpe_s_fmt(ctx, f);
1589 if (V4L2_TYPE_IS_OUTPUT(f->type))
1590 set_src_registers(ctx);
1592 set_dst_registers(ctx);
1594 return set_srcdst_params(ctx);
1597 static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
1599 struct vpe_q_data *q_data;
1601 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1602 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1605 q_data = get_q_data(ctx, s->type);
1609 switch (s->target) {
1610 case V4L2_SEL_TGT_COMPOSE:
1612 * COMPOSE target is only valid for capture buffer type, return
1613 * error for output buffer type
1615 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1618 case V4L2_SEL_TGT_CROP:
1620 * CROP target is only valid for output buffer type, return
1621 * error for capture buffer type
1623 if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1627 * bound and default crop/compose targets are invalid targets to
1634 if (s->r.top < 0 || s->r.left < 0) {
1635 vpe_err(ctx->dev, "negative values for top and left\n");
1636 s->r.top = s->r.left = 0;
1639 v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
1640 &s->r.height, MIN_H, q_data->height, H_ALIGN, S_ALIGN);
1642 /* adjust left/top if cropping rectangle is out of bounds */
1643 if (s->r.left + s->r.width > q_data->width)
1644 s->r.left = q_data->width - s->r.width;
1645 if (s->r.top + s->r.height > q_data->height)
1646 s->r.top = q_data->height - s->r.height;
1651 static int vpe_g_selection(struct file *file, void *fh,
1652 struct v4l2_selection *s)
1654 struct vpe_ctx *ctx = file2ctx(file);
1655 struct vpe_q_data *q_data;
1656 bool use_c_rect = false;
1658 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1659 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1662 q_data = get_q_data(ctx, s->type);
1666 switch (s->target) {
1667 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
1668 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1669 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1672 case V4L2_SEL_TGT_CROP_BOUNDS:
1673 case V4L2_SEL_TGT_CROP_DEFAULT:
1674 if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1677 case V4L2_SEL_TGT_COMPOSE:
1678 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1682 case V4L2_SEL_TGT_CROP:
1683 if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1693 * for CROP/COMPOSE target type, return c_rect params from the
1694 * respective buffer type
1696 s->r = q_data->c_rect;
1699 * for DEFAULT/BOUNDS target type, return width and height from
1700 * S_FMT of the respective buffer type
1704 s->r.width = q_data->width;
1705 s->r.height = q_data->height;
1712 static int vpe_s_selection(struct file *file, void *fh,
1713 struct v4l2_selection *s)
1715 struct vpe_ctx *ctx = file2ctx(file);
1716 struct vpe_q_data *q_data;
1717 struct v4l2_selection sel = *s;
1720 ret = __vpe_try_selection(ctx, &sel);
1724 q_data = get_q_data(ctx, sel.type);
1728 if ((q_data->c_rect.left == sel.r.left) &&
1729 (q_data->c_rect.top == sel.r.top) &&
1730 (q_data->c_rect.width == sel.r.width) &&
1731 (q_data->c_rect.height == sel.r.height)) {
1733 "requested crop/compose values are already set\n");
1737 q_data->c_rect = sel.r;
1739 return set_srcdst_params(ctx);
1742 static int vpe_reqbufs(struct file *file, void *priv,
1743 struct v4l2_requestbuffers *reqbufs)
1745 struct vpe_ctx *ctx = file2ctx(file);
1747 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
1750 static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1752 struct vpe_ctx *ctx = file2ctx(file);
1754 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
1757 static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1759 struct vpe_ctx *ctx = file2ctx(file);
1761 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
1764 static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1766 struct vpe_ctx *ctx = file2ctx(file);
1768 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
1771 static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
1773 struct vpe_ctx *ctx = file2ctx(file);
1775 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
1778 static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
1780 struct vpe_ctx *ctx = file2ctx(file);
1782 vpe_dump_regs(ctx->dev);
1783 vpdma_dump_regs(ctx->dev->vpdma);
1785 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
1789 * defines number of buffers/frames a context can process with VPE before
1790 * switching to a different context. default value is 1 buffer per context
1792 #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1794 static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1796 struct vpe_ctx *ctx =
1797 container_of(ctrl->handler, struct vpe_ctx, hdl);
1800 case V4L2_CID_VPE_BUFS_PER_JOB:
1801 ctx->bufs_per_job = ctrl->val;
1805 vpe_err(ctx->dev, "Invalid control\n");
1812 static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1813 .s_ctrl = vpe_s_ctrl,
1816 static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1817 .vidioc_querycap = vpe_querycap,
1819 .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
1820 .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
1821 .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
1822 .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
1824 .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
1825 .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
1826 .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
1827 .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
1829 .vidioc_g_selection = vpe_g_selection,
1830 .vidioc_s_selection = vpe_s_selection,
1832 .vidioc_reqbufs = vpe_reqbufs,
1833 .vidioc_querybuf = vpe_querybuf,
1835 .vidioc_qbuf = vpe_qbuf,
1836 .vidioc_dqbuf = vpe_dqbuf,
1838 .vidioc_streamon = vpe_streamon,
1839 .vidioc_streamoff = vpe_streamoff,
1840 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1841 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1847 static int vpe_queue_setup(struct vb2_queue *vq,
1848 const struct v4l2_format *fmt,
1849 unsigned int *nbuffers, unsigned int *nplanes,
1850 unsigned int sizes[], void *alloc_ctxs[])
1853 struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
1854 struct vpe_q_data *q_data;
1856 q_data = get_q_data(ctx, vq->type);
1858 *nplanes = q_data->fmt->coplanar ? 2 : 1;
1860 for (i = 0; i < *nplanes; i++) {
1861 sizes[i] = q_data->sizeimage[i];
1862 alloc_ctxs[i] = ctx->dev->alloc_ctx;
1865 vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
1867 if (q_data->fmt->coplanar)
1868 vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
1873 static int vpe_buf_prepare(struct vb2_buffer *vb)
1875 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1876 struct vpe_q_data *q_data;
1879 vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
1881 q_data = get_q_data(ctx, vb->vb2_queue->type);
1882 num_planes = q_data->fmt->coplanar ? 2 : 1;
1884 if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1885 if (!(q_data->flags & Q_DATA_INTERLACED)) {
1886 vb->v4l2_buf.field = V4L2_FIELD_NONE;
1888 if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
1889 vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
1894 for (i = 0; i < num_planes; i++) {
1895 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1897 "data will not fit into plane (%lu < %lu)\n",
1898 vb2_plane_size(vb, i),
1899 (long) q_data->sizeimage[i]);
1904 for (i = 0; i < num_planes; i++)
1905 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1910 static void vpe_buf_queue(struct vb2_buffer *vb)
1912 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1913 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
1916 static void vpe_wait_prepare(struct vb2_queue *q)
1918 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1922 static void vpe_wait_finish(struct vb2_queue *q)
1924 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1928 static struct vb2_ops vpe_qops = {
1929 .queue_setup = vpe_queue_setup,
1930 .buf_prepare = vpe_buf_prepare,
1931 .buf_queue = vpe_buf_queue,
1932 .wait_prepare = vpe_wait_prepare,
1933 .wait_finish = vpe_wait_finish,
1936 static int queue_init(void *priv, struct vb2_queue *src_vq,
1937 struct vb2_queue *dst_vq)
1939 struct vpe_ctx *ctx = priv;
1942 memset(src_vq, 0, sizeof(*src_vq));
1943 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1944 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1945 src_vq->drv_priv = ctx;
1946 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1947 src_vq->ops = &vpe_qops;
1948 src_vq->mem_ops = &vb2_dma_contig_memops;
1949 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1951 ret = vb2_queue_init(src_vq);
1955 memset(dst_vq, 0, sizeof(*dst_vq));
1956 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1957 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1958 dst_vq->drv_priv = ctx;
1959 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1960 dst_vq->ops = &vpe_qops;
1961 dst_vq->mem_ops = &vb2_dma_contig_memops;
1962 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1964 return vb2_queue_init(dst_vq);
1967 static const struct v4l2_ctrl_config vpe_bufs_per_job = {
1968 .ops = &vpe_ctrl_ops,
1969 .id = V4L2_CID_VPE_BUFS_PER_JOB,
1970 .name = "Buffers Per Transaction",
1971 .type = V4L2_CTRL_TYPE_INTEGER,
1972 .def = VPE_DEF_BUFS_PER_JOB,
1974 .max = VIDEO_MAX_FRAME,
1981 static int vpe_open(struct file *file)
1983 struct vpe_dev *dev = video_drvdata(file);
1984 struct vpe_ctx *ctx = NULL;
1985 struct vpe_q_data *s_q_data;
1986 struct v4l2_ctrl_handler *hdl;
1989 vpe_dbg(dev, "vpe_open\n");
1991 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1997 if (mutex_lock_interruptible(&dev->dev_mutex)) {
2002 ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
2003 VPDMA_LIST_TYPE_NORMAL);
2007 ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
2009 goto free_desc_list;
2011 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
2015 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
2021 v4l2_fh_init(&ctx->fh, video_devdata(file));
2022 file->private_data = &ctx->fh;
2025 v4l2_ctrl_handler_init(hdl, 1);
2026 v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
2031 ctx->fh.ctrl_handler = hdl;
2032 v4l2_ctrl_handler_setup(hdl);
2034 s_q_data = &ctx->q_data[Q_DATA_SRC];
2035 s_q_data->fmt = &vpe_formats[2];
2036 s_q_data->width = 1920;
2037 s_q_data->height = 1080;
2038 s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
2039 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
2040 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
2042 s_q_data->colorspace = V4L2_COLORSPACE_REC709;
2043 s_q_data->field = V4L2_FIELD_NONE;
2044 s_q_data->c_rect.left = 0;
2045 s_q_data->c_rect.top = 0;
2046 s_q_data->c_rect.width = s_q_data->width;
2047 s_q_data->c_rect.height = s_q_data->height;
2048 s_q_data->flags = 0;
2050 ctx->q_data[Q_DATA_DST] = *s_q_data;
2052 set_dei_shadow_registers(ctx);
2053 set_src_registers(ctx);
2054 set_dst_registers(ctx);
2055 ret = set_srcdst_params(ctx);
2059 ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
2061 if (IS_ERR(ctx->m2m_ctx)) {
2062 ret = PTR_ERR(ctx->m2m_ctx);
2066 v4l2_fh_add(&ctx->fh);
2069 * for now, just report the creation of the first instance, we can later
2070 * optimize the driver to enable or disable clocks when the first
2071 * instance is created or the last instance released
2073 if (atomic_inc_return(&dev->num_instances) == 1)
2074 vpe_dbg(dev, "first instance created\n");
2076 ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
2078 ctx->load_mmrs = true;
2080 vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
2083 mutex_unlock(&dev->dev_mutex);
2087 v4l2_ctrl_handler_free(hdl);
2088 v4l2_fh_exit(&ctx->fh);
2089 vpdma_free_desc_buf(&ctx->sc_coeff_v);
2091 vpdma_free_desc_buf(&ctx->sc_coeff_h);
2093 vpdma_free_desc_buf(&ctx->mmr_adb);
2095 vpdma_free_desc_list(&ctx->desc_list);
2097 mutex_unlock(&dev->dev_mutex);
2103 static int vpe_release(struct file *file)
2105 struct vpe_dev *dev = video_drvdata(file);
2106 struct vpe_ctx *ctx = file2ctx(file);
2108 vpe_dbg(dev, "releasing instance %p\n", ctx);
2110 mutex_lock(&dev->dev_mutex);
2112 free_mv_buffers(ctx);
2113 vpdma_free_desc_list(&ctx->desc_list);
2114 vpdma_free_desc_buf(&ctx->mmr_adb);
2116 v4l2_fh_del(&ctx->fh);
2117 v4l2_fh_exit(&ctx->fh);
2118 v4l2_ctrl_handler_free(&ctx->hdl);
2119 v4l2_m2m_ctx_release(ctx->m2m_ctx);
2124 * for now, just report the release of the last instance, we can later
2125 * optimize the driver to enable or disable clocks when the first
2126 * instance is created or the last instance released
2128 if (atomic_dec_return(&dev->num_instances) == 0)
2129 vpe_dbg(dev, "last instance released\n");
2131 mutex_unlock(&dev->dev_mutex);
2136 static unsigned int vpe_poll(struct file *file,
2137 struct poll_table_struct *wait)
2139 struct vpe_ctx *ctx = file2ctx(file);
2140 struct vpe_dev *dev = ctx->dev;
2143 mutex_lock(&dev->dev_mutex);
2144 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
2145 mutex_unlock(&dev->dev_mutex);
2149 static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
2151 struct vpe_ctx *ctx = file2ctx(file);
2152 struct vpe_dev *dev = ctx->dev;
2155 if (mutex_lock_interruptible(&dev->dev_mutex))
2156 return -ERESTARTSYS;
2157 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
2158 mutex_unlock(&dev->dev_mutex);
2162 static const struct v4l2_file_operations vpe_fops = {
2163 .owner = THIS_MODULE,
2165 .release = vpe_release,
2167 .unlocked_ioctl = video_ioctl2,
2171 static struct video_device vpe_videodev = {
2172 .name = VPE_MODULE_NAME,
2174 .ioctl_ops = &vpe_ioctl_ops,
2176 .release = video_device_release_empty,
2177 .vfl_dir = VFL_DIR_M2M,
2180 static struct v4l2_m2m_ops m2m_ops = {
2181 .device_run = device_run,
2182 .job_ready = job_ready,
2183 .job_abort = job_abort,
2185 .unlock = vpe_unlock,
2188 static int vpe_runtime_get(struct platform_device *pdev)
2192 dev_dbg(&pdev->dev, "vpe_runtime_get\n");
2194 r = pm_runtime_get_sync(&pdev->dev);
2196 return r < 0 ? r : 0;
2199 static void vpe_runtime_put(struct platform_device *pdev)
2204 dev_dbg(&pdev->dev, "vpe_runtime_put\n");
2206 r = pm_runtime_put_sync(&pdev->dev);
2207 WARN_ON(r < 0 && r != -ENOSYS);
2210 static void vpe_fw_cb(struct platform_device *pdev)
2212 struct vpe_dev *dev = platform_get_drvdata(pdev);
2213 struct video_device *vfd;
2217 *vfd = vpe_videodev;
2218 vfd->lock = &dev->dev_mutex;
2219 vfd->v4l2_dev = &dev->v4l2_dev;
2221 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
2223 vpe_err(dev, "Failed to register video device\n");
2225 vpe_set_clock_enable(dev, 0);
2226 vpe_runtime_put(pdev);
2227 pm_runtime_disable(&pdev->dev);
2228 v4l2_m2m_release(dev->m2m_dev);
2229 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2230 v4l2_device_unregister(&dev->v4l2_dev);
2235 video_set_drvdata(vfd, dev);
2236 snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
2237 dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2241 static int vpe_probe(struct platform_device *pdev)
2243 struct vpe_dev *dev;
2246 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2250 spin_lock_init(&dev->lock);
2252 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
2256 atomic_set(&dev->num_instances, 0);
2257 mutex_init(&dev->dev_mutex);
2259 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2262 * HACK: we get resource info from device tree in the form of a list of
2263 * VPE sub blocks, the driver currently uses only the base of vpe_top
2264 * for register access, the driver should be changed later to access
2265 * registers based on the sub block base addresses
2267 dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
2270 goto v4l2_dev_unreg;
2273 irq = platform_get_irq(pdev, 0);
2274 ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
2277 goto v4l2_dev_unreg;
2279 platform_set_drvdata(pdev, dev);
2281 dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
2282 if (IS_ERR(dev->alloc_ctx)) {
2283 vpe_err(dev, "Failed to alloc vb2 context\n");
2284 ret = PTR_ERR(dev->alloc_ctx);
2285 goto v4l2_dev_unreg;
2288 dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
2289 if (IS_ERR(dev->m2m_dev)) {
2290 vpe_err(dev, "Failed to init mem2mem device\n");
2291 ret = PTR_ERR(dev->m2m_dev);
2295 pm_runtime_enable(&pdev->dev);
2297 ret = vpe_runtime_get(pdev);
2301 /* Perform clk enable followed by reset */
2302 vpe_set_clock_enable(dev, 1);
2306 func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2307 VPE_PID_FUNC_SHIFT);
2308 vpe_dbg(dev, "VPE PID function %x\n", func);
2310 vpe_top_vpdma_reset(dev);
2312 dev->sc = sc_create(pdev);
2313 if (IS_ERR(dev->sc)) {
2314 ret = PTR_ERR(dev->sc);
2318 dev->csc = csc_create(pdev);
2319 if (IS_ERR(dev->csc)) {
2320 ret = PTR_ERR(dev->csc);
2324 dev->vpdma = vpdma_create(pdev, vpe_fw_cb);
2325 if (IS_ERR(dev->vpdma)) {
2326 ret = PTR_ERR(dev->vpdma);
2333 vpe_runtime_put(pdev);
2335 pm_runtime_disable(&pdev->dev);
2336 v4l2_m2m_release(dev->m2m_dev);
2338 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2340 v4l2_device_unregister(&dev->v4l2_dev);
2345 static int vpe_remove(struct platform_device *pdev)
2347 struct vpe_dev *dev = platform_get_drvdata(pdev);
2349 v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2351 v4l2_m2m_release(dev->m2m_dev);
2352 video_unregister_device(&dev->vfd);
2353 v4l2_device_unregister(&dev->v4l2_dev);
2354 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2356 vpe_set_clock_enable(dev, 0);
2357 vpe_runtime_put(pdev);
2358 pm_runtime_disable(&pdev->dev);
2363 #if defined(CONFIG_OF)
2364 static const struct of_device_id vpe_of_match[] = {
2366 .compatible = "ti,vpe",
2371 #define vpe_of_match NULL
2374 static struct platform_driver vpe_pdrv = {
2376 .remove = vpe_remove,
2378 .name = VPE_MODULE_NAME,
2379 .of_match_table = vpe_of_match,
2383 module_platform_driver(vpe_pdrv);
2385 MODULE_DESCRIPTION("TI VPE driver");
2386 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2387 MODULE_LICENSE("GPL");