FFmpeg
v4l2_context.c
Go to the documentation of this file.
1 /*
2  * V4L2 context helper functions.
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <poll.h>
30 #include "libavcodec/avcodec.h"
31 #include "libavcodec/internal.h"
32 #include "v4l2_buffers.h"
33 #include "v4l2_fmt.h"
34 #include "v4l2_m2m.h"
35 
37  uint32_t v4l2_fmt;
39 
42 };
43 
45 {
46  return V4L2_TYPE_IS_OUTPUT(ctx->type) ?
48  container_of(ctx, V4L2m2mContext, capture);
49 }
50 
51 static inline AVClass *logger(V4L2Context *ctx)
52 {
53  return ctx_to_m2mctx(ctx)->priv;
54 }
55 
56 static inline unsigned int v4l2_get_width(struct v4l2_format *fmt)
57 {
58  return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
59 }
60 
61 static inline unsigned int v4l2_get_height(struct v4l2_format *fmt)
62 {
63  return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
64 }
65 
67 {
68  struct AVRational sar = { 0, 1 };
69  struct v4l2_cropcap cropcap;
70  int ret;
71 
72  memset(&cropcap, 0, sizeof(cropcap));
73  cropcap.type = ctx->type;
74 
75  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_CROPCAP, &cropcap);
76  if (ret)
77  return sar;
78 
79  sar.num = cropcap.pixelaspect.numerator;
80  sar.den = cropcap.pixelaspect.denominator;
81  return sar;
82 }
83 
84 static inline unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
85 {
86  struct v4l2_format *fmt1 = &ctx->format;
87  int ret = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
88  fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
89  fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
90  :
91  fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
92  fmt1->fmt.pix.height != fmt2->fmt.pix.height;
93 
94  if (ret)
95  av_log(logger(ctx), AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
96  ctx->name,
97  v4l2_get_width(fmt1), v4l2_get_height(fmt1),
98  v4l2_get_width(fmt2), v4l2_get_height(fmt2));
99 
100  return ret;
101 }
102 
104 {
105  return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
106  ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
107  ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
108  ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
109 }
110 
112 {
114  const int SZ_4K = 0x1000;
115  int size;
116 
117  if (s->avctx && av_codec_is_decoder(s->avctx->codec))
118  return ((width * height * 3 / 2) / 2) + 128;
119 
120  /* encoder */
121  size = FFALIGN(height, 32) * FFALIGN(width, 32) * 3 / 2 / 2;
122  return FFALIGN(size, SZ_4K);
123 }
124 
126 {
127  ctx->format.type = ctx->type;
128 
129  if (fmt->update_avfmt)
130  ctx->av_pix_fmt = fmt->av_fmt;
131 
132  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
133  /* update the sizes to handle the reconfiguration of the capture stream at runtime */
134  ctx->format.fmt.pix_mp.height = ctx->height;
135  ctx->format.fmt.pix_mp.width = ctx->width;
136  if (fmt->update_v4l2) {
137  ctx->format.fmt.pix_mp.pixelformat = fmt->v4l2_fmt;
138 
139  /* s5p-mfc requires the user to specify a buffer size */
140  ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
141  v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
142  }
143  } else {
144  ctx->format.fmt.pix.height = ctx->height;
145  ctx->format.fmt.pix.width = ctx->width;
146  if (fmt->update_v4l2) {
147  ctx->format.fmt.pix.pixelformat = fmt->v4l2_fmt;
148 
149  /* s5p-mfc requires the user to specify a buffer size */
150  ctx->format.fmt.pix.sizeimage =
151  v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
152  }
153  }
154 }
155 
156 /**
157  * returns 1 if reinit was successful, negative if it failed
158  * returns 0 if reinit was not executed
159  */
161 {
163  struct v4l2_format cap_fmt = s->capture.format;
164  struct v4l2_format out_fmt = s->output.format;
165  struct v4l2_event evt = { 0 };
166  int full_reinit, reinit, ret;
167 
168  ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt);
169  if (ret < 0) {
170  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
171  return 0;
172  }
173 
174  if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
175  return 0;
176 
177  ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt);
178  if (ret) {
179  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->output.name);
180  return 0;
181  }
182 
183  ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
184  if (ret) {
185  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name);
186  return 0;
187  }
188 
189  full_reinit = v4l2_resolution_changed(&s->output, &out_fmt);
190  if (full_reinit) {
191  s->output.height = v4l2_get_height(&out_fmt);
192  s->output.width = v4l2_get_width(&out_fmt);
194  }
195 
196  reinit = v4l2_resolution_changed(&s->capture, &cap_fmt);
197  if (reinit) {
198  s->capture.height = v4l2_get_height(&cap_fmt);
199  s->capture.width = v4l2_get_width(&cap_fmt);
201  }
202 
203  if (full_reinit || reinit)
204  s->reinit = 1;
205 
206  if (full_reinit) {
208  if (ret) {
209  av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit\n");
210  return -EINVAL;
211  }
212  goto reinit_run;
213  }
214 
215  if (reinit) {
216  if (s->avctx)
218  if (ret < 0)
219  av_log(logger(ctx), AV_LOG_WARNING, "update avcodec height and width\n");
220 
221  ret = ff_v4l2_m2m_codec_reinit(s);
222  if (ret) {
223  av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n");
224  return -EINVAL;
225  }
226  goto reinit_run;
227  }
228 
229  /* dummy event received */
230  return 0;
231 
232  /* reinit executed */
233 reinit_run:
234  return 1;
235 }
236 
238 {
239  struct v4l2_decoder_cmd cmd = {
240  .cmd = V4L2_DEC_CMD_STOP,
241  .flags = 0,
242  };
243  int ret;
244 
245  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DECODER_CMD, &cmd);
246  if (ret) {
247  /* DECODER_CMD is optional */
248  if (errno == ENOTTY)
249  return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
250  else
251  return AVERROR(errno);
252  }
253 
254  return 0;
255 }
256 
258 {
259  struct v4l2_encoder_cmd cmd = {
260  .cmd = V4L2_ENC_CMD_STOP,
261  .flags = 0,
262  };
263  int ret;
264 
265  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENCODER_CMD, &cmd);
266  if (ret) {
267  /* ENCODER_CMD is optional */
268  if (errno == ENOTTY)
269  return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
270  else
271  return AVERROR(errno);
272  }
273 
274  return 0;
275 }
276 
278 {
279  struct v4l2_plane planes[VIDEO_MAX_PLANES];
280  struct v4l2_buffer buf = { 0 };
281  V4L2Buffer* avbuf = NULL;
282  struct pollfd pfd = {
283  .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
284  .fd = ctx_to_m2mctx(ctx)->fd,
285  };
286  int i, ret;
287 
288  /* if we are draining and there are no more capture buffers queued in the driver we are done */
289  if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
290  for (i = 0; i < ctx->num_buffers; i++) {
291  /* capture buffer initialization happens during decode hence
292  * detection happens at runtime
293  */
294  if (!ctx->buffers)
295  break;
296 
297  if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
298  goto start;
299  }
300  ctx->done = 1;
301  return NULL;
302  }
303 
304 start:
305  if (V4L2_TYPE_IS_OUTPUT(ctx->type))
306  pfd.events = POLLOUT | POLLWRNORM;
307  else {
308  /* no need to listen to requests for more input while draining */
309  if (ctx_to_m2mctx(ctx)->draining)
310  pfd.events = POLLIN | POLLRDNORM | POLLPRI;
311  }
312 
313  for (;;) {
314  ret = poll(&pfd, 1, timeout);
315  if (ret > 0)
316  break;
317  if (errno == EINTR)
318  continue;
319  return NULL;
320  }
321 
322  /* 0. handle errors */
323  if (pfd.revents & POLLERR) {
324  /* if we are trying to get free buffers but none have been queued yet
325  no need to raise a warning */
326  if (timeout == 0) {
327  for (i = 0; i < ctx->num_buffers; i++) {
328  if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
329  av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
330  }
331  }
332  else
333  av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
334 
335  return NULL;
336  }
337 
338  /* 1. handle resolution changes */
339  if (pfd.revents & POLLPRI) {
340  ret = v4l2_handle_event(ctx);
341  if (ret < 0) {
342  /* if re-init failed, abort */
343  ctx->done = 1;
344  return NULL;
345  }
346  if (ret) {
347  /* if re-init was successful drop the buffer (if there was one)
348  * since we had to reconfigure capture (unmap all buffers)
349  */
350  return NULL;
351  }
352  }
353 
354  /* 2. dequeue the buffer */
355  if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
356 
357  if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
358  /* there is a capture buffer ready */
359  if (pfd.revents & (POLLIN | POLLRDNORM))
360  goto dequeue;
361 
362  /* the driver is ready to accept more input; instead of waiting for the capture
363  * buffer to complete we return NULL so input can proceed (we are single threaded)
364  */
365  if (pfd.revents & (POLLOUT | POLLWRNORM))
366  return NULL;
367  }
368 
369 dequeue:
370  memset(&buf, 0, sizeof(buf));
371  buf.memory = V4L2_MEMORY_MMAP;
372  buf.type = ctx->type;
373  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
374  memset(planes, 0, sizeof(planes));
375  buf.length = VIDEO_MAX_PLANES;
376  buf.m.planes = planes;
377  }
378 
379  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
380  if (ret) {
381  if (errno != EAGAIN) {
382  ctx->done = 1;
383  if (errno != EPIPE)
384  av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
385  ctx->name, av_err2str(AVERROR(errno)));
386  }
387  return NULL;
388  }
389 
390  avbuf = &ctx->buffers[buf.index];
391  avbuf->status = V4L2BUF_AVAILABLE;
392  avbuf->buf = buf;
393  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
394  memcpy(avbuf->planes, planes, sizeof(planes));
395  avbuf->buf.m.planes = avbuf->planes;
396  }
397  return avbuf;
398  }
399 
400  return NULL;
401 }
402 
404 {
405  int timeout = 0; /* return when no more buffers to dequeue */
406  int i;
407 
408  /* get back as many output buffers as possible */
409  if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
410  do {
411  } while (v4l2_dequeue_v4l2buf(ctx, timeout));
412  }
413 
414  for (i = 0; i < ctx->num_buffers; i++) {
415  if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
416  return &ctx->buffers[i];
417  }
418 
419  return NULL;
420 }
421 
423 {
424  struct v4l2_requestbuffers req = {
425  .memory = V4L2_MEMORY_MMAP,
426  .type = ctx->type,
427  .count = 0, /* 0 -> unmaps buffers from the driver */
428  };
429  int i, j;
430 
431  for (i = 0; i < ctx->num_buffers; i++) {
432  V4L2Buffer *buffer = &ctx->buffers[i];
433 
434  for (j = 0; j < buffer->num_planes; j++) {
435  struct V4L2Plane_info *p = &buffer->plane_info[j];
436  if (p->mm_addr && p->length)
437  if (munmap(p->mm_addr, p->length) < 0)
438  av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
439  }
440  }
441 
442  return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
443 }
444 
446 {
447  struct v4l2_format *fmt = &ctx->format;
448  uint32_t v4l2_fmt;
449  int ret;
450 
451  v4l2_fmt = ff_v4l2_format_avfmt_to_v4l2(pixfmt);
452  if (!v4l2_fmt)
453  return AVERROR(EINVAL);
454 
455  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
456  fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
457  else
458  fmt->fmt.pix.pixelformat = v4l2_fmt;
459 
460  fmt->type = ctx->type;
461 
462  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, fmt);
463  if (ret)
464  return AVERROR(EINVAL);
465 
466  return 0;
467 }
468 
470 {
471  enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
472  struct v4l2_fmtdesc fdesc;
473  int ret;
474 
475  memset(&fdesc, 0, sizeof(fdesc));
476  fdesc.type = ctx->type;
477 
478  if (pixfmt != AV_PIX_FMT_NONE) {
479  ret = v4l2_try_raw_format(ctx, pixfmt);
480  if (!ret)
481  return 0;
482  }
483 
484  for (;;) {
485  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
486  if (ret)
487  return AVERROR(EINVAL);
488 
489  pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO);
490  ret = v4l2_try_raw_format(ctx, pixfmt);
491  if (ret){
492  fdesc.index++;
493  continue;
494  }
495 
496  *p = pixfmt;
497 
498  return 0;
499  }
500 
501  return AVERROR(EINVAL);
502 }
503 
504 static int v4l2_get_coded_format(V4L2Context* ctx, uint32_t *p)
505 {
506  struct v4l2_fmtdesc fdesc;
507  uint32_t v4l2_fmt;
508  int ret;
509 
510  /* translate to a valid v4l2 format */
512  if (!v4l2_fmt)
513  return AVERROR(EINVAL);
514 
515  /* check if the driver supports this format */
516  memset(&fdesc, 0, sizeof(fdesc));
517  fdesc.type = ctx->type;
518 
519  for (;;) {
520  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
521  if (ret)
522  return AVERROR(EINVAL);
523 
524  if (fdesc.pixelformat == v4l2_fmt)
525  break;
526 
527  fdesc.index++;
528  }
529 
530  *p = v4l2_fmt;
531 
532  return 0;
533 }
534 
535  /*****************************************************************************
536  *
537  * V4L2 Context Interface
538  *
539  *****************************************************************************/
540 
542 {
543  int type = ctx->type;
544  int ret;
545 
546  ret = ioctl(ctx_to_m2mctx(ctx)->fd, cmd, &type);
547  if (ret < 0)
548  return AVERROR(errno);
549 
550  ctx->streamon = (cmd == VIDIOC_STREAMON);
551 
552  return 0;
553 }
554 
556 {
558  V4L2Buffer* avbuf;
559  int ret;
560 
561  if (!frame) {
562  ret = v4l2_stop_encode(ctx);
563  if (ret)
564  av_log(logger(ctx), AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
565  s->draining= 1;
566  return 0;
567  }
568 
569  avbuf = v4l2_getfree_v4l2buf(ctx);
570  if (!avbuf)
571  return AVERROR(ENOMEM);
572 
573  ret = ff_v4l2_buffer_avframe_to_buf(frame, avbuf);
574  if (ret)
575  return ret;
576 
577  return ff_v4l2_buffer_enqueue(avbuf);
578 }
579 
581 {
583  V4L2Buffer* avbuf;
584  int ret;
585 
586  if (!pkt->size) {
587  ret = v4l2_stop_decode(ctx);
588  if (ret)
589  av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
590  s->draining = 1;
591  return 0;
592  }
593 
594  avbuf = v4l2_getfree_v4l2buf(ctx);
595  if (!avbuf)
596  return AVERROR(EAGAIN);
597 
598  ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
599  if (ret)
600  return ret;
601 
602  return ff_v4l2_buffer_enqueue(avbuf);
603 }
604 
606 {
607  V4L2Buffer* avbuf = NULL;
608 
609  /*
610  * timeout=-1 blocks until:
611  * 1. decoded frame available
612  * 2. an input buffer is ready to be dequeued
613  */
614  avbuf = v4l2_dequeue_v4l2buf(ctx, timeout);
615  if (!avbuf) {
616  if (ctx->done)
617  return AVERROR_EOF;
618 
619  return AVERROR(EAGAIN);
620  }
621 
622  return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
623 }
624 
626 {
627  V4L2Buffer* avbuf = NULL;
628 
629  /*
630  * blocks until:
631  * 1. encoded packet available
632  * 2. an input buffer ready to be dequeued
633  */
634  avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
635  if (!avbuf) {
636  if (ctx->done)
637  return AVERROR_EOF;
638 
639  return AVERROR(EAGAIN);
640  }
641 
642  return ff_v4l2_buffer_buf_to_avpkt(pkt, avbuf);
643 }
644 
646 {
647  struct v4l2_format_update fmt = { 0 };
648  int ret;
649 
650  if (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
651  ret = v4l2_get_raw_format(ctx, &fmt.av_fmt);
652  if (ret)
653  return ret;
654 
655  fmt.update_avfmt = !probe;
656  v4l2_save_to_context(ctx, &fmt);
657 
658  /* format has been tried already */
659  return ret;
660  }
661 
662  ret = v4l2_get_coded_format(ctx, &fmt.v4l2_fmt);
663  if (ret)
664  return ret;
665 
666  fmt.update_v4l2 = 1;
667  v4l2_save_to_context(ctx, &fmt);
668 
669  return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, &ctx->format);
670 }
671 
673 {
674  return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
675 }
676 
678 {
679  int ret;
680 
681  if (!ctx->buffers)
682  return;
683 
684  ret = v4l2_release_buffers(ctx);
685  if (ret)
686  av_log(logger(ctx), AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name);
687 
688  av_free(ctx->buffers);
689  ctx->buffers = NULL;
690 }
691 
693 {
695  struct v4l2_requestbuffers req;
696  int ret, i;
697 
698  if (!v4l2_type_supported(ctx)) {
699  av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
700  return AVERROR_PATCHWELCOME;
701  }
702 
703  ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
704  if (ret)
705  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
706 
707  memset(&req, 0, sizeof(req));
708  req.count = ctx->num_buffers;
709  req.memory = V4L2_MEMORY_MMAP;
710  req.type = ctx->type;
711  ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
712  if (ret < 0) {
713  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno));
714  return AVERROR(errno);
715  }
716 
717  ctx->num_buffers = req.count;
718  ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
719  if (!ctx->buffers) {
720  av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
721  return AVERROR(ENOMEM);
722  }
723 
724  for (i = 0; i < req.count; i++) {
725  ctx->buffers[i].context = ctx;
726  ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
727  if (ret < 0) {
728  av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret));
729  goto error;
730  }
731  }
732 
733  av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
734  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
735  req.count,
736  v4l2_get_width(&ctx->format),
737  v4l2_get_height(&ctx->format),
738  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
739  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);
740 
741  return 0;
742 
743 error:
745 
746  av_free(ctx->buffers);
747  ctx->buffers = NULL;
748 
749  return ret;
750 }
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
Definition: v4l2_fmt.c:132
static int v4l2_stop_encode(V4L2Context *ctx)
Definition: v4l2_context.c:257
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1577
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
const char * name
context name.
Definition: v4l2_context.h:40
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
const char * fmt
Definition: avisynth_c.h:861
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
Definition: v4l2_buffers.c:453
static int v4l2_handle_event(V4L2Context *ctx)
returns 1 if reinit was successful, negative if it failed returns 0 if reinit was not executed ...
Definition: v4l2_context.c:160
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
Definition: v4l2_context.c:692
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
Definition: v4l2_buffers.c:494
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt)
Definition: v4l2_fmt.c:122
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1481
GLint GLenum type
Definition: opengl_enc.c:104
int width
Width and height of the frames it produces (in case of a capture context, e.g.
Definition: v4l2_context.h:71
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
Definition: v4l2_buffers.c:412
void * priv
Definition: v4l2_m2m.h:65
static int v4l2_type_supported(V4L2Context *ctx)
Definition: v4l2_context.c:103
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
Definition: v4l2_context.c:625
enum V4L2Buffer_status status
Definition: v4l2_buffers.h:64
void ff_v4l2_context_release(V4L2Context *ctx)
Releases a V4L2Context.
Definition: v4l2_context.c:677
int ff_v4l2_context_set_format(V4L2Context *ctx)
Sets the V4L2Context format in the v4l2 driver.
Definition: v4l2_context.c:672
int ff_v4l2_context_get_format(V4L2Context *ctx, int probe)
Queries the driver for a valid v4l2 format and copies it to the context.
Definition: v4l2_context.c:645
static V4L2Buffer * v4l2_getfree_v4l2buf(V4L2Context *ctx)
Definition: v4l2_context.c:403
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:99
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
Definition: v4l2_context.h:59
static unsigned int v4l2_get_width(struct v4l2_format *fmt)
Definition: v4l2_context.c:56
static V4L2m2mContext * ctx_to_m2mctx(V4L2Context *ctx)
Definition: v4l2_context.c:44
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
Definition: v4l2_buffers.c:561
#define height
#define AVERROR_EOF
End of file.
Definition: error.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define FFALIGN(x, a)
Definition: macros.h:48
int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the capture paramete...
Definition: v4l2_m2m.c:205
#define av_log(a,...)
struct V4L2Context * context
Definition: v4l2_buffers.h:43
int done
Either no more buffers available or an unrecoverable error was notified by the V4L2 kernel driver: on...
Definition: v4l2_context.h:93
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
struct v4l2_buffer buf
Definition: v4l2_buffers.h:60
static int v4l2_get_framesize_compressed(V4L2Context *ctx, int width, int height)
Definition: v4l2_context.c:111
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
Definition: v4l2_buffers.c:419
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:53
static void v4l2_save_to_context(V4L2Context *ctx, struct v4l2_format_update *fmt)
Definition: v4l2_context.c:125
static int v4l2_release_buffers(V4L2Context *ctx)
Definition: v4l2_context.c:422
static const struct @321 planes[]
V4L2Buffer * buffers
Indexed array of V4L2Buffers.
Definition: v4l2_context.h:77
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:87
#define width
static int v4l2_get_raw_format(V4L2Context *ctx, enum AVPixelFormat *p)
Definition: v4l2_context.c:469
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
Definition: v4l2_context.c:580
static int v4l2_try_raw_format(V4L2Context *ctx, enum AVPixelFormat pixfmt)
Definition: v4l2_context.c:445
struct v4l2_plane planes[VIDEO_MAX_PLANES]
Definition: v4l2_buffers.h:61
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static int v4l2_stop_decode(V4L2Context *ctx)
Definition: v4l2_context.c:237
static void error(const char *err)
enum AVPixelFormat av_fmt
Definition: v4l2_context.c:40
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:65
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
Definition: v4l2_context.c:541
AVRational sample_aspect_ratio
Definition: v4l2_context.h:72
static V4L2Buffer * v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
Definition: v4l2_context.c:277
V4L2Context capture
Definition: v4l2_m2m.h:48
Libavcodec external API header.
V4L2Buffer (wrapper for v4l2_buffer management)
Definition: v4l2_buffers.h:41
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
Definition: v4l2_buffers.c:478
void * buf
Definition: avisynth_c.h:766
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
Definition: v4l2_context.c:605
int ff_v4l2_m2m_codec_full_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the any of the curre...
Definition: v4l2_m2m.c:248
Describe the class of an AVClass context structure.
Definition: log.h:67
#define container_of(ptr, type, member)
Definition: v4l2_m2m.h:35
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static AVClass * logger(V4L2Context *ctx)
Definition: v4l2_context.c:51
V4L2Context output
Definition: v4l2_m2m.h:49
static AVRational v4l2_get_sar(V4L2Context *ctx)
Definition: v4l2_context.c:66
static int v4l2_get_coded_format(V4L2Context *ctx, uint32_t *p)
Definition: v4l2_context.c:504
static void reinit(Jpeg2000EncoderContext *s)
Definition: j2kenc.c:985
static int probe(const AVProbeData *p)
Definition: act.c:36
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
Definition: v4l2_context.c:555
common internal api header.
int den
Denominator.
Definition: rational.h:60
uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec)
Definition: v4l2_fmt.c:112
#define av_free(p)
int num_planes
Definition: v4l2_buffers.h:57
enum AVPixelFormat pixfmt
Definition: kmsgrab.c:202
int num_buffers
Readonly after init.
Definition: v4l2_context.h:82
void INT64 start
Definition: avisynth_c.h:766
static unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
Definition: v4l2_context.c:84
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1457
static unsigned int v4l2_get_height(struct v4l2_format *fmt)
Definition: v4l2_context.c:61
GLuint buffer
Definition: opengl_enc.c:101
enum v4l2_buf_type type
Type of this buffer context.
Definition: v4l2_context.h:47