2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
31 module_param(debug, bool, 0644);
33 #define dprintk(fmt, arg...) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING (1 << 1)
46 /* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49 #define DST_QUEUE_OFF_BASE (1 << 30)
53 * struct v4l2_m2m_dev - per-device context
54 * @curr_ctx: currently running instance
55 * @job_queue: instances queued to run
56 * @job_spinlock: protects job_queue
57 * @m2m_ops: driver callbacks
60 struct v4l2_m2m_ctx *curr_ctx;
62 struct list_head job_queue;
63 spinlock_t job_spinlock;
65 const struct v4l2_m2m_ops *m2m_ops;
68 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69 enum v4l2_buf_type type)
71 if (V4L2_TYPE_IS_OUTPUT(type))
72 return &m2m_ctx->out_q_ctx;
74 return &m2m_ctx->cap_q_ctx;
78 * v4l2_m2m_get_vq() - return vb2_queue for the given type
80 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
81 enum v4l2_buf_type type)
83 struct v4l2_m2m_queue_ctx *q_ctx;
85 q_ctx = get_queue_ctx(m2m_ctx, type);
91 EXPORT_SYMBOL(v4l2_m2m_get_vq);
94 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
96 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
98 struct v4l2_m2m_buffer *b = NULL;
101 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
103 if (list_empty(&q_ctx->rdy_queue)) {
104 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
108 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
109 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
112 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
115 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
118 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
120 struct v4l2_m2m_buffer *b = NULL;
123 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
124 if (list_empty(&q_ctx->rdy_queue)) {
125 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
128 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
131 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
135 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
138 * Scheduling handlers
142 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
143 * running instance or NULL if no instance is running
145 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
150 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
151 if (m2m_dev->curr_ctx)
152 ret = m2m_dev->curr_ctx->priv;
153 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
157 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
160 * v4l2_m2m_try_run() - select next job to perform and run it if possible
162 * Get next transaction (if present) from the waiting jobs list and run it.
164 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
168 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
169 if (NULL != m2m_dev->curr_ctx) {
170 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
171 dprintk("Another instance is running, won't run now\n");
175 if (list_empty(&m2m_dev->job_queue)) {
176 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
177 dprintk("No job pending\n");
181 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
182 struct v4l2_m2m_ctx, queue);
183 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
184 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
186 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
190 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
191 * the pending job queue and add it if so.
192 * @m2m_ctx: m2m context assigned to the instance to be checked
194 * There are three basic requirements an instance has to meet to be able to run:
195 * 1) at least one source buffer has to be queued,
196 * 2) at least one destination buffer has to be queued,
197 * 3) streaming has to be on.
199 * There may also be additional, custom requirements. In such case the driver
200 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
201 * return 1 if the instance is ready.
202 * An example of the above could be an instance that requires more than one
203 * src/dst buffer per transaction.
205 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
207 struct v4l2_m2m_dev *m2m_dev;
208 unsigned long flags_job, flags;
210 m2m_dev = m2m_ctx->m2m_dev;
211 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
213 if (!m2m_ctx->out_q_ctx.q.streaming
214 || !m2m_ctx->cap_q_ctx.q.streaming) {
215 dprintk("Streaming needs to be on for both queues\n");
219 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
220 if (m2m_ctx->job_flags & TRANS_QUEUED) {
221 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
222 dprintk("On job queue already\n");
226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
229 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230 dprintk("No input buffers available\n");
233 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
234 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
235 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
236 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
237 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
238 dprintk("No output buffers available\n");
241 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
242 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
244 if (m2m_dev->m2m_ops->job_ready
245 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
246 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
247 dprintk("Driver not ready\n");
251 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
252 m2m_ctx->job_flags |= TRANS_QUEUED;
254 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
256 v4l2_m2m_try_run(m2m_dev);
260 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
261 * and have it clean up
263 * Called by a driver to yield back the device after it has finished with it.
264 * Should be called as soon as possible after reaching a state which allows
265 * other instances to take control of the device.
267 * This function has to be called only after device_run() callback has been
268 * called on the driver. To prevent recursion, it should not be called directly
269 * from the device_run() callback though.
271 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
272 struct v4l2_m2m_ctx *m2m_ctx)
276 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
277 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
278 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
279 dprintk("Called by an instance not currently running\n");
283 list_del(&m2m_dev->curr_ctx->queue);
284 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
285 wake_up(&m2m_dev->curr_ctx->finished);
286 m2m_dev->curr_ctx = NULL;
288 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
290 /* This instance might have more buffers ready, but since we do not
291 * allow more than one job on the job_queue per instance, each has
292 * to be scheduled separately after the previous one finishes. */
293 v4l2_m2m_try_schedule(m2m_ctx);
294 v4l2_m2m_try_run(m2m_dev);
296 EXPORT_SYMBOL(v4l2_m2m_job_finish);
299 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
301 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
302 struct v4l2_requestbuffers *reqbufs)
304 struct vb2_queue *vq;
306 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
307 return vb2_reqbufs(vq, reqbufs);
309 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
312 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
314 * See v4l2_m2m_mmap() documentation for details.
316 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
317 struct v4l2_buffer *buf)
319 struct vb2_queue *vq;
323 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
324 ret = vb2_querybuf(vq, buf);
326 /* Adjust MMAP memory offsets for the CAPTURE queue */
327 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
328 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
329 for (i = 0; i < buf->length; ++i)
330 buf->m.planes[i].m.mem_offset
331 += DST_QUEUE_OFF_BASE;
333 buf->m.offset += DST_QUEUE_OFF_BASE;
339 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
342 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
345 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
346 struct v4l2_buffer *buf)
348 struct vb2_queue *vq;
351 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
352 ret = vb2_qbuf(vq, buf);
354 v4l2_m2m_try_schedule(m2m_ctx);
358 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
361 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
364 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
365 struct v4l2_buffer *buf)
367 struct vb2_queue *vq;
369 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
370 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
372 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
375 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
378 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
379 struct v4l2_exportbuffer *eb)
381 struct vb2_queue *vq;
383 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
384 return vb2_expbuf(vq, eb);
386 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
388 * v4l2_m2m_streamon() - turn on streaming for a video queue
390 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
391 enum v4l2_buf_type type)
393 struct vb2_queue *vq;
396 vq = v4l2_m2m_get_vq(m2m_ctx, type);
397 ret = vb2_streamon(vq, type);
399 v4l2_m2m_try_schedule(m2m_ctx);
403 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
406 * v4l2_m2m_streamoff() - turn off streaming for a video queue
408 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
409 enum v4l2_buf_type type)
411 struct v4l2_m2m_dev *m2m_dev;
412 struct v4l2_m2m_queue_ctx *q_ctx;
413 unsigned long flags_job, flags;
416 q_ctx = get_queue_ctx(m2m_ctx, type);
417 ret = vb2_streamoff(&q_ctx->q, type);
421 m2m_dev = m2m_ctx->m2m_dev;
422 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
423 /* We should not be scheduled anymore, since we're dropping a queue. */
424 INIT_LIST_HEAD(&m2m_ctx->queue);
425 m2m_ctx->job_flags = 0;
427 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
428 /* Drop queue, since streamoff returns device to the same state as after
429 * calling reqbufs. */
430 INIT_LIST_HEAD(&q_ctx->rdy_queue);
431 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
433 if (m2m_dev->curr_ctx == m2m_ctx) {
434 m2m_dev->curr_ctx = NULL;
435 wake_up(&m2m_ctx->finished);
437 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
441 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
444 * v4l2_m2m_poll() - poll replacement, for destination buffers only
446 * Call from the driver's poll() function. Will poll both queues. If a buffer
447 * is available to dequeue (with dqbuf) from the source queue, this will
448 * indicate that a non-blocking write can be performed, while read will be
449 * returned in case of the destination queue.
451 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
452 struct poll_table_struct *wait)
454 struct video_device *vfd = video_devdata(file);
455 unsigned long req_events = poll_requested_events(wait);
456 struct vb2_queue *src_q, *dst_q;
457 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
461 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
462 struct v4l2_fh *fh = file->private_data;
464 if (v4l2_event_pending(fh))
466 else if (req_events & POLLPRI)
467 poll_wait(file, &fh->wait, wait);
468 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
472 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
473 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
476 * There has to be at least one buffer queued on each queued_list, which
477 * means either in driver already or waiting for driver to claim it
478 * and start processing.
480 if ((!src_q->streaming || list_empty(&src_q->queued_list))
481 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
486 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
487 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
489 poll_wait(file, &src_q->done_wq, wait);
490 poll_wait(file, &dst_q->done_wq, wait);
492 if (m2m_ctx->m2m_dev->m2m_ops->lock)
493 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
495 spin_lock_irqsave(&src_q->done_lock, flags);
496 if (!list_empty(&src_q->done_list))
497 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
499 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
500 || src_vb->state == VB2_BUF_STATE_ERROR))
501 rc |= POLLOUT | POLLWRNORM;
502 spin_unlock_irqrestore(&src_q->done_lock, flags);
504 spin_lock_irqsave(&dst_q->done_lock, flags);
505 if (!list_empty(&dst_q->done_list))
506 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
508 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
509 || dst_vb->state == VB2_BUF_STATE_ERROR))
510 rc |= POLLIN | POLLRDNORM;
511 spin_unlock_irqrestore(&dst_q->done_lock, flags);
516 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
519 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
521 * Call from driver's mmap() function. Will handle mmap() for both queues
522 * seamlessly for videobuffer, which will receive normal per-queue offsets and
523 * proper videobuf queue pointers. The differentiation is made outside videobuf
524 * by adding a predefined offset to buffers from one of the queues and
525 * subtracting it before passing it back to videobuf. Only drivers (and
526 * thus applications) receive modified offsets.
528 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
529 struct vm_area_struct *vma)
531 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
532 struct vb2_queue *vq;
534 if (offset < DST_QUEUE_OFF_BASE) {
535 vq = v4l2_m2m_get_src_vq(m2m_ctx);
537 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
538 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
541 return vb2_mmap(vq, vma);
543 EXPORT_SYMBOL(v4l2_m2m_mmap);
546 * v4l2_m2m_init() - initialize per-driver m2m data
548 * Usually called from driver's probe() function.
550 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
552 struct v4l2_m2m_dev *m2m_dev;
554 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
555 WARN_ON(!m2m_ops->job_abort))
556 return ERR_PTR(-EINVAL);
558 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
560 return ERR_PTR(-ENOMEM);
562 m2m_dev->curr_ctx = NULL;
563 m2m_dev->m2m_ops = m2m_ops;
564 INIT_LIST_HEAD(&m2m_dev->job_queue);
565 spin_lock_init(&m2m_dev->job_spinlock);
569 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
572 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
574 * Usually called from driver's remove() function.
576 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
580 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
583 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
584 * @priv - driver's instance private data
585 * @m2m_dev - a previously initialized m2m_dev struct
586 * @vq_init - a callback for queue type-specific initialization function to be
587 * used for initializing videobuf_queues
589 * Usually called from driver's open() function.
591 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
593 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
595 struct v4l2_m2m_ctx *m2m_ctx;
596 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
599 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
601 return ERR_PTR(-ENOMEM);
603 m2m_ctx->priv = drv_priv;
604 m2m_ctx->m2m_dev = m2m_dev;
605 init_waitqueue_head(&m2m_ctx->finished);
607 out_q_ctx = &m2m_ctx->out_q_ctx;
608 cap_q_ctx = &m2m_ctx->cap_q_ctx;
610 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
611 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
612 spin_lock_init(&out_q_ctx->rdy_spinlock);
613 spin_lock_init(&cap_q_ctx->rdy_spinlock);
615 INIT_LIST_HEAD(&m2m_ctx->queue);
617 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
627 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
630 * v4l2_m2m_ctx_release() - release m2m context
632 * Usually called from driver's release() function.
634 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
636 struct v4l2_m2m_dev *m2m_dev;
639 m2m_dev = m2m_ctx->m2m_dev;
641 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
642 if (m2m_ctx->job_flags & TRANS_RUNNING) {
643 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
644 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
645 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
646 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
647 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
648 list_del(&m2m_ctx->queue);
649 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
650 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
651 dprintk("m2m_ctx: %p had been on queue and was removed\n",
654 /* Do nothing, was not on queue/running */
655 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
658 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
659 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
663 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
666 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
668 * Call from buf_queue(), videobuf_queue_ops callback.
670 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
672 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
673 struct v4l2_m2m_queue_ctx *q_ctx;
676 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
680 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
681 list_add_tail(&b->list, &q_ctx->rdy_queue);
683 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
685 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);