1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
5 * Helper functions for devices that use videobuf buffers for both their
6 * source and destination.
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
9 * Pawel Osciak, <pawel@osciak.com>
10 * Marek Szyprowski, <m.szyprowski@samsung.com>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
26 MODULE_LICENSE("GPL");
29 module_param(debug, bool, 0644);
31 #define dprintk(fmt, arg...) \
34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
38 /* Instance is already queued on the job_queue */
39 #define TRANS_QUEUED (1 << 0)
40 /* Instance is currently running in hardware */
41 #define TRANS_RUNNING (1 << 1)
42 /* Instance is currently aborting */
43 #define TRANS_ABORT (1 << 2)
46 /* Offset base for buffers on the destination queue - used to distinguish
47 * between source and destination buffers when mmapping - they receive the same
48 * offsets but for different queues */
49 #define DST_QUEUE_OFF_BASE (1 << 30)
51 enum v4l2_m2m_entity_type {
52 MEM2MEM_ENT_TYPE_SOURCE,
53 MEM2MEM_ENT_TYPE_SINK,
57 static const char * const m2m_entity_name[] = {
64 * struct v4l2_m2m_dev - per-device context
65 * @source: &struct media_entity pointer with the source entity
66 * Used only when the M2M device is registered via
67 * v4l2_m2m_unregister_media_controller().
68 * @source_pad: &struct media_pad with the source pad.
69 * Used only when the M2M device is registered via
70 * v4l2_m2m_unregister_media_controller().
71 * @sink: &struct media_entity pointer with the sink entity
72 * Used only when the M2M device is registered via
73 * v4l2_m2m_unregister_media_controller().
74 * @sink_pad: &struct media_pad with the sink pad.
75 * Used only when the M2M device is registered via
76 * v4l2_m2m_unregister_media_controller().
77 * @proc: &struct media_entity pointer with the M2M device itself.
78 * @proc_pads: &struct media_pad with the @proc pads.
79 * Used only when the M2M device is registered via
80 * v4l2_m2m_unregister_media_controller().
81 * @intf_devnode: &struct media_intf devnode pointer with the interface
82 * with controls the M2M device.
83 * @curr_ctx: currently running instance
84 * @job_queue: instances queued to run
85 * @job_spinlock: protects job_queue
86 * @job_work: worker to run queued jobs.
87 * @m2m_ops: driver callbacks
90 struct v4l2_m2m_ctx *curr_ctx;
91 #ifdef CONFIG_MEDIA_CONTROLLER
92 struct media_entity *source;
93 struct media_pad source_pad;
94 struct media_entity sink;
95 struct media_pad sink_pad;
96 struct media_entity proc;
97 struct media_pad proc_pads[2];
98 struct media_intf_devnode *intf_devnode;
101 struct list_head job_queue;
102 spinlock_t job_spinlock;
103 struct work_struct job_work;
105 const struct v4l2_m2m_ops *m2m_ops;
108 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
109 enum v4l2_buf_type type)
111 if (V4L2_TYPE_IS_OUTPUT(type))
112 return &m2m_ctx->out_q_ctx;
114 return &m2m_ctx->cap_q_ctx;
117 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
118 enum v4l2_buf_type type)
120 struct v4l2_m2m_queue_ctx *q_ctx;
122 q_ctx = get_queue_ctx(m2m_ctx, type);
128 EXPORT_SYMBOL(v4l2_m2m_get_vq);
130 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
132 struct v4l2_m2m_buffer *b;
135 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
137 if (list_empty(&q_ctx->rdy_queue)) {
138 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
142 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
143 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
146 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
148 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
150 struct v4l2_m2m_buffer *b;
153 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
155 if (list_empty(&q_ctx->rdy_queue)) {
156 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
160 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
161 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
164 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
166 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
168 struct v4l2_m2m_buffer *b;
171 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
172 if (list_empty(&q_ctx->rdy_queue)) {
173 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
176 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
183 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
185 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
186 struct vb2_v4l2_buffer *vbuf)
188 struct v4l2_m2m_buffer *b;
191 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
192 b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
195 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
197 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
199 struct vb2_v4l2_buffer *
200 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
203 struct v4l2_m2m_buffer *b, *tmp;
204 struct vb2_v4l2_buffer *ret = NULL;
207 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
208 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
209 if (b->vb.vb2_buf.index == idx) {
216 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
220 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
223 * Scheduling handlers
226 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
231 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
232 if (m2m_dev->curr_ctx)
233 ret = m2m_dev->curr_ctx->priv;
234 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
238 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
241 * v4l2_m2m_try_run() - select next job to perform and run it if possible
242 * @m2m_dev: per-device context
244 * Get next transaction (if present) from the waiting jobs list and run it.
246 * Note that this function can run on a given v4l2_m2m_ctx context,
247 * but call .device_run for another context.
249 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
253 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
254 if (NULL != m2m_dev->curr_ctx) {
255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
256 dprintk("Another instance is running, won't run now\n");
260 if (list_empty(&m2m_dev->job_queue)) {
261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
262 dprintk("No job pending\n");
266 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
267 struct v4l2_m2m_ctx, queue);
268 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
269 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
271 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
272 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
276 * __v4l2_m2m_try_queue() - queue a job
277 * @m2m_dev: m2m device
278 * @m2m_ctx: m2m context
280 * Check if this context is ready to queue a job.
282 * This function can run in interrupt context.
284 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
285 struct v4l2_m2m_ctx *m2m_ctx)
287 unsigned long flags_job;
288 struct vb2_v4l2_buffer *dst, *src;
290 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
292 if (!m2m_ctx->out_q_ctx.q.streaming
293 || !m2m_ctx->cap_q_ctx.q.streaming) {
294 dprintk("Streaming needs to be on for both queues\n");
298 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
300 /* If the context is aborted then don't schedule it */
301 if (m2m_ctx->job_flags & TRANS_ABORT) {
302 dprintk("Aborted context\n");
306 if (m2m_ctx->job_flags & TRANS_QUEUED) {
307 dprintk("On job queue already\n");
311 src = v4l2_m2m_next_src_buf(m2m_ctx);
312 dst = v4l2_m2m_next_dst_buf(m2m_ctx);
313 if (!src && !m2m_ctx->out_q_ctx.buffered) {
314 dprintk("No input buffers available\n");
317 if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
318 dprintk("No output buffers available\n");
322 m2m_ctx->new_frame = true;
324 if (src && dst && dst->is_held &&
325 dst->vb2_buf.copied_timestamp &&
326 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
327 dst->is_held = false;
328 v4l2_m2m_dst_buf_remove(m2m_ctx);
329 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
330 dst = v4l2_m2m_next_dst_buf(m2m_ctx);
332 if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
333 dprintk("No output buffers available after returning held buffer\n");
338 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
339 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
340 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
341 dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
343 if (m2m_dev->m2m_ops->job_ready
344 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
345 dprintk("Driver not ready\n");
349 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
350 m2m_ctx->job_flags |= TRANS_QUEUED;
353 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
357 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
358 * @m2m_ctx: m2m context
360 * Check if this context is ready to queue a job. If suitable,
361 * run the next queued job on the mem2mem device.
363 * This function shouldn't run in interrupt context.
365 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
366 * and then run another job for another context.
368 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
370 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
372 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
373 v4l2_m2m_try_run(m2m_dev);
375 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
378 * v4l2_m2m_device_run_work() - run pending jobs for the context
379 * @work: Work structure used for scheduling the execution of this function.
381 static void v4l2_m2m_device_run_work(struct work_struct *work)
383 struct v4l2_m2m_dev *m2m_dev =
384 container_of(work, struct v4l2_m2m_dev, job_work);
386 v4l2_m2m_try_run(m2m_dev);
390 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
391 * @m2m_ctx: m2m context with jobs to be canceled
393 * In case of streamoff or release called on any context,
394 * 1] If the context is currently running, then abort job will be called
395 * 2] If the context is queued, then the context will be removed from
398 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
400 struct v4l2_m2m_dev *m2m_dev;
403 m2m_dev = m2m_ctx->m2m_dev;
404 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
406 m2m_ctx->job_flags |= TRANS_ABORT;
407 if (m2m_ctx->job_flags & TRANS_RUNNING) {
408 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
409 if (m2m_dev->m2m_ops->job_abort)
410 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
411 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
412 wait_event(m2m_ctx->finished,
413 !(m2m_ctx->job_flags & TRANS_RUNNING));
414 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
415 list_del(&m2m_ctx->queue);
416 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
417 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
418 dprintk("m2m_ctx: %p had been on queue and was removed\n",
421 /* Do nothing, was not on queue/running */
422 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
427 * Schedule the next job, called from v4l2_m2m_job_finish() or
428 * v4l2_m2m_buf_done_and_job_finish().
430 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
431 struct v4l2_m2m_ctx *m2m_ctx)
434 * This instance might have more buffers ready, but since we do not
435 * allow more than one job on the job_queue per instance, each has
436 * to be scheduled separately after the previous one finishes.
438 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
441 * We might be running in atomic context,
442 * but the job must be run in non-atomic context.
444 schedule_work(&m2m_dev->job_work);
448 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
449 * v4l2_m2m_buf_done_and_job_finish().
451 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
452 struct v4l2_m2m_ctx *m2m_ctx)
454 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
455 dprintk("Called by an instance not currently running\n");
459 list_del(&m2m_dev->curr_ctx->queue);
460 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
461 wake_up(&m2m_dev->curr_ctx->finished);
462 m2m_dev->curr_ctx = NULL;
466 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
467 struct v4l2_m2m_ctx *m2m_ctx)
473 * This function should not be used for drivers that support
474 * holding capture buffers. Those should use
475 * v4l2_m2m_buf_done_and_job_finish() instead.
477 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
478 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
479 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
480 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
481 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
484 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
486 EXPORT_SYMBOL(v4l2_m2m_job_finish);
488 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
489 struct v4l2_m2m_ctx *m2m_ctx,
490 enum vb2_buffer_state state)
492 struct vb2_v4l2_buffer *src_buf, *dst_buf;
493 bool schedule_next = false;
496 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
497 src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
498 dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
500 if (WARN_ON(!src_buf || !dst_buf))
502 v4l2_m2m_buf_done(src_buf, state);
503 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
504 if (!dst_buf->is_held) {
505 v4l2_m2m_dst_buf_remove(m2m_ctx);
506 v4l2_m2m_buf_done(dst_buf, state);
508 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
510 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
513 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
515 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
517 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
518 struct v4l2_requestbuffers *reqbufs)
520 struct vb2_queue *vq;
523 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
524 ret = vb2_reqbufs(vq, reqbufs);
525 /* If count == 0, then the owner has released all buffers and he
526 is no longer owner of the queue. Otherwise we have an owner. */
528 vq->owner = reqbufs->count ? file->private_data : NULL;
532 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
534 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
535 struct v4l2_buffer *buf)
537 struct vb2_queue *vq;
541 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
542 ret = vb2_querybuf(vq, buf);
544 /* Adjust MMAP memory offsets for the CAPTURE queue */
545 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
546 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
547 for (i = 0; i < buf->length; ++i)
548 buf->m.planes[i].m.mem_offset
549 += DST_QUEUE_OFF_BASE;
551 buf->m.offset += DST_QUEUE_OFF_BASE;
557 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
559 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
560 struct v4l2_buffer *buf)
562 struct video_device *vdev = video_devdata(file);
563 struct vb2_queue *vq;
566 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
567 if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
568 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
569 dprintk("%s: requests cannot be used with capture buffers\n",
573 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
574 if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
575 v4l2_m2m_try_schedule(m2m_ctx);
579 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
581 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
582 struct v4l2_buffer *buf)
584 struct vb2_queue *vq;
586 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
587 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
589 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
591 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
592 struct v4l2_buffer *buf)
594 struct video_device *vdev = video_devdata(file);
595 struct vb2_queue *vq;
597 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
598 return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
600 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
602 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
603 struct v4l2_create_buffers *create)
605 struct vb2_queue *vq;
607 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
608 return vb2_create_bufs(vq, create);
610 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
612 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
613 struct v4l2_exportbuffer *eb)
615 struct vb2_queue *vq;
617 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
618 return vb2_expbuf(vq, eb);
620 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
622 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
623 enum v4l2_buf_type type)
625 struct vb2_queue *vq;
628 vq = v4l2_m2m_get_vq(m2m_ctx, type);
629 ret = vb2_streamon(vq, type);
631 v4l2_m2m_try_schedule(m2m_ctx);
635 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
637 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
638 enum v4l2_buf_type type)
640 struct v4l2_m2m_dev *m2m_dev;
641 struct v4l2_m2m_queue_ctx *q_ctx;
642 unsigned long flags_job, flags;
645 /* wait until the current context is dequeued from job_queue */
646 v4l2_m2m_cancel_job(m2m_ctx);
648 q_ctx = get_queue_ctx(m2m_ctx, type);
649 ret = vb2_streamoff(&q_ctx->q, type);
653 m2m_dev = m2m_ctx->m2m_dev;
654 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
655 /* We should not be scheduled anymore, since we're dropping a queue. */
656 if (m2m_ctx->job_flags & TRANS_QUEUED)
657 list_del(&m2m_ctx->queue);
658 m2m_ctx->job_flags = 0;
660 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
661 /* Drop queue, since streamoff returns device to the same state as after
662 * calling reqbufs. */
663 INIT_LIST_HEAD(&q_ctx->rdy_queue);
665 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
667 if (m2m_dev->curr_ctx == m2m_ctx) {
668 m2m_dev->curr_ctx = NULL;
669 wake_up(&m2m_ctx->finished);
671 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
675 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
677 static __poll_t v4l2_m2m_poll_for_data(struct file *file,
678 struct v4l2_m2m_ctx *m2m_ctx,
679 struct poll_table_struct *wait)
681 struct vb2_queue *src_q, *dst_q;
682 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
686 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
687 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
689 poll_wait(file, &src_q->done_wq, wait);
690 poll_wait(file, &dst_q->done_wq, wait);
693 * There has to be at least one buffer queued on each queued_list, which
694 * means either in driver already or waiting for driver to claim it
695 * and start processing.
697 if ((!src_q->streaming || src_q->error ||
698 list_empty(&src_q->queued_list)) &&
699 (!dst_q->streaming || dst_q->error ||
700 list_empty(&dst_q->queued_list)))
703 spin_lock_irqsave(&dst_q->done_lock, flags);
704 if (list_empty(&dst_q->done_list)) {
706 * If the last buffer was dequeued from the capture queue,
707 * return immediately. DQBUF will return -EPIPE.
709 if (dst_q->last_buffer_dequeued) {
710 spin_unlock_irqrestore(&dst_q->done_lock, flags);
711 return EPOLLIN | EPOLLRDNORM;
714 spin_unlock_irqrestore(&dst_q->done_lock, flags);
716 spin_lock_irqsave(&src_q->done_lock, flags);
717 if (!list_empty(&src_q->done_list))
718 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
720 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
721 || src_vb->state == VB2_BUF_STATE_ERROR))
722 rc |= EPOLLOUT | EPOLLWRNORM;
723 spin_unlock_irqrestore(&src_q->done_lock, flags);
725 spin_lock_irqsave(&dst_q->done_lock, flags);
726 if (!list_empty(&dst_q->done_list))
727 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
729 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
730 || dst_vb->state == VB2_BUF_STATE_ERROR))
731 rc |= EPOLLIN | EPOLLRDNORM;
732 spin_unlock_irqrestore(&dst_q->done_lock, flags);
737 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
738 struct poll_table_struct *wait)
740 struct video_device *vfd = video_devdata(file);
741 __poll_t req_events = poll_requested_events(wait);
744 if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
745 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
747 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
748 struct v4l2_fh *fh = file->private_data;
750 poll_wait(file, &fh->wait, wait);
751 if (v4l2_event_pending(fh))
757 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
759 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
760 struct vm_area_struct *vma)
762 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
763 struct vb2_queue *vq;
765 if (offset < DST_QUEUE_OFF_BASE) {
766 vq = v4l2_m2m_get_src_vq(m2m_ctx);
768 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
769 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
772 return vb2_mmap(vq, vma);
774 EXPORT_SYMBOL(v4l2_m2m_mmap);
776 #if defined(CONFIG_MEDIA_CONTROLLER)
777 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
779 media_remove_intf_links(&m2m_dev->intf_devnode->intf);
780 media_devnode_remove(m2m_dev->intf_devnode);
782 media_entity_remove_links(m2m_dev->source);
783 media_entity_remove_links(&m2m_dev->sink);
784 media_entity_remove_links(&m2m_dev->proc);
785 media_device_unregister_entity(m2m_dev->source);
786 media_device_unregister_entity(&m2m_dev->sink);
787 media_device_unregister_entity(&m2m_dev->proc);
788 kfree(m2m_dev->source->name);
789 kfree(m2m_dev->sink.name);
790 kfree(m2m_dev->proc.name);
792 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
794 static int v4l2_m2m_register_entity(struct media_device *mdev,
795 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
796 struct video_device *vdev, int function)
798 struct media_entity *entity;
799 struct media_pad *pads;
806 case MEM2MEM_ENT_TYPE_SOURCE:
807 entity = m2m_dev->source;
808 pads = &m2m_dev->source_pad;
809 pads[0].flags = MEDIA_PAD_FL_SOURCE;
812 case MEM2MEM_ENT_TYPE_SINK:
813 entity = &m2m_dev->sink;
814 pads = &m2m_dev->sink_pad;
815 pads[0].flags = MEDIA_PAD_FL_SINK;
818 case MEM2MEM_ENT_TYPE_PROC:
819 entity = &m2m_dev->proc;
820 pads = m2m_dev->proc_pads;
821 pads[0].flags = MEDIA_PAD_FL_SINK;
822 pads[1].flags = MEDIA_PAD_FL_SOURCE;
829 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
830 if (type != MEM2MEM_ENT_TYPE_PROC) {
831 entity->info.dev.major = VIDEO_MAJOR;
832 entity->info.dev.minor = vdev->minor;
834 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
835 name = kmalloc(len, GFP_KERNEL);
838 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
840 entity->function = function;
842 ret = media_entity_pads_init(entity, num_pads, pads);
845 ret = media_device_register_entity(mdev, entity);
852 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
853 struct video_device *vdev, int function)
855 struct media_device *mdev = vdev->v4l2_dev->mdev;
856 struct media_link *link;
862 /* A memory-to-memory device consists in two
863 * DMA engine and one video processing entities.
864 * The DMA engine entities are linked to a V4L interface
867 /* Create the three entities with their pads */
868 m2m_dev->source = &vdev->entity;
869 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
870 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
873 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
874 MEM2MEM_ENT_TYPE_PROC, vdev, function);
876 goto err_rel_entity0;
877 ret = v4l2_m2m_register_entity(mdev, m2m_dev,
878 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
880 goto err_rel_entity1;
882 /* Connect the three entities */
883 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
884 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
886 goto err_rel_entity2;
888 ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
889 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
893 /* Create video interface */
894 m2m_dev->intf_devnode = media_devnode_create(mdev,
895 MEDIA_INTF_T_V4L_VIDEO, 0,
896 VIDEO_MAJOR, vdev->minor);
897 if (!m2m_dev->intf_devnode) {
902 /* Connect the two DMA engines to the interface */
903 link = media_create_intf_link(m2m_dev->source,
904 &m2m_dev->intf_devnode->intf,
905 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
911 link = media_create_intf_link(&m2m_dev->sink,
912 &m2m_dev->intf_devnode->intf,
913 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
916 goto err_rm_intf_link;
921 media_remove_intf_links(&m2m_dev->intf_devnode->intf);
923 media_devnode_remove(m2m_dev->intf_devnode);
925 media_entity_remove_links(&m2m_dev->sink);
927 media_entity_remove_links(&m2m_dev->proc);
928 media_entity_remove_links(m2m_dev->source);
930 media_device_unregister_entity(&m2m_dev->proc);
931 kfree(m2m_dev->proc.name);
933 media_device_unregister_entity(&m2m_dev->sink);
934 kfree(m2m_dev->sink.name);
936 media_device_unregister_entity(m2m_dev->source);
937 kfree(m2m_dev->source->name);
941 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
944 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
946 struct v4l2_m2m_dev *m2m_dev;
948 if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
949 return ERR_PTR(-EINVAL);
951 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
953 return ERR_PTR(-ENOMEM);
955 m2m_dev->curr_ctx = NULL;
956 m2m_dev->m2m_ops = m2m_ops;
957 INIT_LIST_HEAD(&m2m_dev->job_queue);
958 spin_lock_init(&m2m_dev->job_spinlock);
959 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
963 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
965 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
969 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
971 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
973 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
975 struct v4l2_m2m_ctx *m2m_ctx;
976 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
979 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
981 return ERR_PTR(-ENOMEM);
983 m2m_ctx->priv = drv_priv;
984 m2m_ctx->m2m_dev = m2m_dev;
985 init_waitqueue_head(&m2m_ctx->finished);
987 out_q_ctx = &m2m_ctx->out_q_ctx;
988 cap_q_ctx = &m2m_ctx->cap_q_ctx;
990 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
991 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
992 spin_lock_init(&out_q_ctx->rdy_spinlock);
993 spin_lock_init(&cap_q_ctx->rdy_spinlock);
995 INIT_LIST_HEAD(&m2m_ctx->queue);
997 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
1002 * Both queues should use same the mutex to lock the m2m context.
1003 * This lock is used in some v4l2_m2m_* helpers.
1005 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
1009 m2m_ctx->q_lock = out_q_ctx->q.lock;
1014 return ERR_PTR(ret);
1016 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
1018 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
1020 /* wait until the current context is dequeued from job_queue */
1021 v4l2_m2m_cancel_job(m2m_ctx);
1023 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
1024 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
1028 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
1030 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
1031 struct vb2_v4l2_buffer *vbuf)
1033 struct v4l2_m2m_buffer *b = container_of(vbuf,
1034 struct v4l2_m2m_buffer, vb);
1035 struct v4l2_m2m_queue_ctx *q_ctx;
1036 unsigned long flags;
1038 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
1042 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
1043 list_add_tail(&b->list, &q_ctx->rdy_queue);
1045 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
1047 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
1049 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
1050 struct vb2_v4l2_buffer *cap_vb,
1051 bool copy_frame_flags)
1053 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1055 if (copy_frame_flags)
1056 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
1057 V4L2_BUF_FLAG_BFRAME;
1059 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
1061 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1062 cap_vb->timecode = out_vb->timecode;
1063 cap_vb->field = out_vb->field;
1064 cap_vb->flags &= ~mask;
1065 cap_vb->flags |= out_vb->flags & mask;
1066 cap_vb->vb2_buf.copied_timestamp = 1;
1068 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
1070 void v4l2_m2m_request_queue(struct media_request *req)
1072 struct media_request_object *obj, *obj_safe;
1073 struct v4l2_m2m_ctx *m2m_ctx = NULL;
1076 * Queue all objects. Note that buffer objects are at the end of the
1077 * objects list, after all other object types. Once buffer objects
1078 * are queued, the driver might delete them immediately (if the driver
1079 * processes the buffer at once), so we have to use
1080 * list_for_each_entry_safe() to handle the case where the object we
1083 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
1084 struct v4l2_m2m_ctx *m2m_ctx_obj;
1085 struct vb2_buffer *vb;
1087 if (!obj->ops->queue)
1090 if (vb2_request_object_is_buffer(obj)) {
1092 vb = container_of(obj, struct vb2_buffer, req_obj);
1093 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
1094 m2m_ctx_obj = container_of(vb->vb2_queue,
1095 struct v4l2_m2m_ctx,
1097 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
1098 m2m_ctx = m2m_ctx_obj;
1102 * The buffer we queue here can in theory be immediately
1103 * unbound, hence the use of list_for_each_entry_safe()
1104 * above and why we call the queue op last.
1106 obj->ops->queue(obj);
1112 v4l2_m2m_try_schedule(m2m_ctx);
1114 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
1116 /* Videobuf2 ioctl helpers */
1118 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
1119 struct v4l2_requestbuffers *rb)
1121 struct v4l2_fh *fh = file->private_data;
1123 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
1125 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
1127 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
1128 struct v4l2_create_buffers *create)
1130 struct v4l2_fh *fh = file->private_data;
1132 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
1134 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
1136 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
1137 struct v4l2_buffer *buf)
1139 struct v4l2_fh *fh = file->private_data;
1141 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
1143 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
1145 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
1146 struct v4l2_buffer *buf)
1148 struct v4l2_fh *fh = file->private_data;
1150 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
1152 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
1154 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
1155 struct v4l2_buffer *buf)
1157 struct v4l2_fh *fh = file->private_data;
1159 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
1161 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
1163 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1164 struct v4l2_buffer *buf)
1166 struct v4l2_fh *fh = file->private_data;
1168 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1170 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1172 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1173 struct v4l2_exportbuffer *eb)
1175 struct v4l2_fh *fh = file->private_data;
1177 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1179 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1181 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1182 enum v4l2_buf_type type)
1184 struct v4l2_fh *fh = file->private_data;
1186 return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1188 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1190 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1191 enum v4l2_buf_type type)
1193 struct v4l2_fh *fh = file->private_data;
1195 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1197 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1199 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
1200 struct v4l2_encoder_cmd *ec)
1202 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
1208 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
1210 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
1211 struct v4l2_decoder_cmd *dc)
1213 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
1218 if (dc->cmd == V4L2_DEC_CMD_STOP) {
1220 } else if (dc->cmd == V4L2_DEC_CMD_START) {
1221 dc->start.speed = 0;
1222 dc->start.format = V4L2_DEC_START_FMT_NONE;
1226 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
1228 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
1229 struct v4l2_decoder_cmd *dc)
1231 if (dc->cmd != V4L2_DEC_CMD_FLUSH)
1238 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
1240 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
1241 struct v4l2_decoder_cmd *dc)
1243 struct v4l2_fh *fh = file->private_data;
1244 struct vb2_v4l2_buffer *out_vb, *cap_vb;
1245 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
1246 unsigned long flags;
1249 ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
1253 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
1254 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
1255 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
1258 * If there is an out buffer pending, then clear any HOLD flag.
1260 * By clearing this flag we ensure that when this output
1261 * buffer is processed any held capture buffer will be released.
1264 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
1265 } else if (cap_vb && cap_vb->is_held) {
1267 * If there were no output buffers, but there is a
1268 * capture buffer that is held, then release that
1271 cap_vb->is_held = false;
1272 v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
1273 v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
1275 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
1279 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
1282 * v4l2_file_operations helpers. It is assumed here same lock is used
1283 * for the output and the capture buffer queue.
1286 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1288 struct v4l2_fh *fh = file->private_data;
1290 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
1292 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1294 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
1296 struct v4l2_fh *fh = file->private_data;
1297 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
1300 if (m2m_ctx->q_lock)
1301 mutex_lock(m2m_ctx->q_lock);
1303 ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1305 if (m2m_ctx->q_lock)
1306 mutex_unlock(m2m_ctx->q_lock);
1310 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);