FFmpeg
v4l2_context.c
Go to the documentation of this file.
1 /*
2  * V4L2 context helper functions.
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <poll.h>
30 #include "libavcodec/avcodec.h"
31 #include "libavcodec/internal.h"
32 #include "v4l2_buffers.h"
33 #include "v4l2_fmt.h"
34 #include "v4l2_m2m.h"
35 
37  uint32_t v4l2_fmt;
39 
42 };
43 
45 {
46  return V4L2_TYPE_IS_OUTPUT(ctx->type) ?
48  container_of(ctx, V4L2m2mContext, capture);
49 }
50 
52 {
53  return ctx_to_m2mctx(ctx)->avctx;
54 }
55 
56 static inline unsigned int v4l2_get_width(struct v4l2_format *fmt)
57 {
58  return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
59 }
60 
61 static inline unsigned int v4l2_get_height(struct v4l2_format *fmt)
62 {
63  return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
64 }
65 
66 static inline unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
67 {
68  struct v4l2_format *fmt1 = &ctx->format;
69  int ret = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
70  fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
71  fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
72  :
73  fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
74  fmt1->fmt.pix.height != fmt2->fmt.pix.height;
75 
76  if (ret)
77  av_log(logger(ctx), AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
78  ctx->name,
79  v4l2_get_width(fmt1), v4l2_get_height(fmt1),
80  v4l2_get_width(fmt2), v4l2_get_height(fmt2));
81 
82  return ret;
83 }
84 
85 static inline int v4l2_type_supported(V4L2Context *ctx)
86 {
87  return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
88  ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
89  ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
90  ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
91 }
92 
94 {
96  const int SZ_4K = 0x1000;
97  int size;
98 
100  return ((width * height * 3 / 2) / 2) + 128;
101 
102  /* encoder */
103  size = FFALIGN(height, 32) * FFALIGN(width, 32) * 3 / 2 / 2;
104  return FFALIGN(size, SZ_4K);
105 }
106 
108 {
109  ctx->format.type = ctx->type;
110 
111  if (fmt->update_avfmt)
112  ctx->av_pix_fmt = fmt->av_fmt;
113 
114  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
115  /* update the sizes to handle the reconfiguration of the capture stream at runtime */
116  ctx->format.fmt.pix_mp.height = ctx->height;
117  ctx->format.fmt.pix_mp.width = ctx->width;
118  if (fmt->update_v4l2) {
119  ctx->format.fmt.pix_mp.pixelformat = fmt->v4l2_fmt;
120 
121  /* s5p-mfc requires the user to specify a buffer size */
122  ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
123  v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
124  }
125  } else {
126  ctx->format.fmt.pix.height = ctx->height;
127  ctx->format.fmt.pix.width = ctx->width;
128  if (fmt->update_v4l2) {
129  ctx->format.fmt.pix.pixelformat = fmt->v4l2_fmt;
130 
131  /* s5p-mfc requires the user to specify a buffer size */
132  ctx->format.fmt.pix.sizeimage =
133  v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
134  }
135  }
136 }
137 
138 /**
139  * returns 1 if reinit was successful, negative if it failed
140  * returns 0 if reinit was not executed
141  */
143 {
145  struct v4l2_format cap_fmt = s->capture.format;
146  struct v4l2_format out_fmt = s->output.format;
147  struct v4l2_event evt = { 0 };
148  int full_reinit, reinit, ret;
149 
150  ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt);
151  if (ret < 0) {
152  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
153  return 0;
154  }
155 
156  if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
157  return 0;
158 
159  ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt);
160  if (ret) {
161  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->output.name);
162  return 0;
163  }
164 
165  ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
166  if (ret) {
167  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name);
168  return 0;
169  }
170 
171  full_reinit = v4l2_resolution_changed(&s->output, &out_fmt);
172  if (full_reinit) {
173  s->output.height = v4l2_get_height(&out_fmt);
174  s->output.width = v4l2_get_width(&out_fmt);
175  }
176 
177  reinit = v4l2_resolution_changed(&s->capture, &cap_fmt);
178  if (reinit) {
179  s->capture.height = v4l2_get_height(&cap_fmt);
180  s->capture.width = v4l2_get_width(&cap_fmt);
181  }
182 
183  if (full_reinit || reinit)
184  s->reinit = 1;
185 
186  if (full_reinit) {
188  if (ret) {
189  av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit\n");
190  return -EINVAL;
191  }
192  goto reinit_run;
193  }
194 
195  if (reinit) {
197  if (ret < 0)
198  av_log(logger(ctx), AV_LOG_WARNING, "update avcodec height and width\n");
199 
200  ret = ff_v4l2_m2m_codec_reinit(s);
201  if (ret) {
202  av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n");
203  return -EINVAL;
204  }
205  goto reinit_run;
206  }
207 
208  /* dummy event received */
209  return 0;
210 
211  /* reinit executed */
212 reinit_run:
213  return 1;
214 }
215 
217 {
218  struct v4l2_decoder_cmd cmd = {
219  .cmd = V4L2_DEC_CMD_STOP,
220  .flags = 0,
221  };
222  int ret;
223 
224  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DECODER_CMD, &cmd);
225  if (ret) {
226  /* DECODER_CMD is optional */
227  if (errno == ENOTTY)
228  return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
229  }
230 
231  return 0;
232 }
233 
235 {
236  struct v4l2_encoder_cmd cmd = {
237  .cmd = V4L2_ENC_CMD_STOP,
238  .flags = 0,
239  };
240  int ret;
241 
242  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENCODER_CMD, &cmd);
243  if (ret) {
244  /* ENCODER_CMD is optional */
245  if (errno == ENOTTY)
246  return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
247  }
248 
249  return 0;
250 }
251 
253 {
254  struct v4l2_plane planes[VIDEO_MAX_PLANES];
255  struct v4l2_buffer buf = { 0 };
256  V4L2Buffer* avbuf = NULL;
257  struct pollfd pfd = {
258  .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
259  .fd = ctx_to_m2mctx(ctx)->fd,
260  };
261  int i, ret;
262 
263  /* if we are draining and there are no more capture buffers queued in the driver we are done */
264  if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
265  for (i = 0; i < ctx->num_buffers; i++) {
266  if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
267  goto start;
268  }
269  ctx->done = 1;
270  return NULL;
271  }
272 
273 start:
274  if (V4L2_TYPE_IS_OUTPUT(ctx->type))
275  pfd.events = POLLOUT | POLLWRNORM;
276  else {
277  /* no need to listen to requests for more input while draining */
278  if (ctx_to_m2mctx(ctx)->draining)
279  pfd.events = POLLIN | POLLRDNORM | POLLPRI;
280  }
281 
282  for (;;) {
283  ret = poll(&pfd, 1, timeout);
284  if (ret > 0)
285  break;
286  if (errno == EINTR)
287  continue;
288  return NULL;
289  }
290 
291  /* 0. handle errors */
292  if (pfd.revents & POLLERR) {
293  /* if we are trying to get free buffers but none have been queued yet
294  no need to raise a warning */
295  if (timeout == 0) {
296  for (i = 0; i < ctx->num_buffers; i++) {
297  if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
298  av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
299  }
300  }
301  else
302  av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
303 
304  return NULL;
305  }
306 
307  /* 1. handle resolution changes */
308  if (pfd.revents & POLLPRI) {
309  ret = v4l2_handle_event(ctx);
310  if (ret < 0) {
311  /* if re-init failed, abort */
312  ctx->done = 1;
313  return NULL;
314  }
315  if (ret) {
316  /* if re-init was successful drop the buffer (if there was one)
317  * since we had to reconfigure capture (unmap all buffers)
318  */
319  return NULL;
320  }
321  }
322 
323  /* 2. dequeue the buffer */
324  if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
325 
326  if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
327  /* there is a capture buffer ready */
328  if (pfd.revents & (POLLIN | POLLRDNORM))
329  goto dequeue;
330 
331  /* the driver is ready to accept more input; instead of waiting for the capture
332  * buffer to complete we return NULL so input can proceed (we are single threaded)
333  */
334  if (pfd.revents & (POLLOUT | POLLWRNORM))
335  return NULL;
336  }
337 
338 dequeue:
339  memset(&buf, 0, sizeof(buf));
340  buf.memory = V4L2_MEMORY_MMAP;
341  buf.type = ctx->type;
342  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
343  memset(planes, 0, sizeof(planes));
344  buf.length = VIDEO_MAX_PLANES;
345  buf.m.planes = planes;
346  }
347 
348  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
349  if (ret) {
350  if (errno != EAGAIN) {
351  ctx->done = 1;
352  if (errno != EPIPE)
353  av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
354  ctx->name, av_err2str(AVERROR(errno)));
355  }
356  return NULL;
357  }
358 
359  avbuf = &ctx->buffers[buf.index];
360  avbuf->status = V4L2BUF_AVAILABLE;
361  avbuf->buf = buf;
362  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
363  memcpy(avbuf->planes, planes, sizeof(planes));
364  avbuf->buf.m.planes = avbuf->planes;
365  }
366  return avbuf;
367  }
368 
369  return NULL;
370 }
371 
373 {
374  int timeout = 0; /* return when no more buffers to dequeue */
375  int i;
376 
377  /* get back as many output buffers as possible */
378  if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
379  do {
380  } while (v4l2_dequeue_v4l2buf(ctx, timeout));
381  }
382 
383  for (i = 0; i < ctx->num_buffers; i++) {
384  if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
385  return &ctx->buffers[i];
386  }
387 
388  return NULL;
389 }
390 
392 {
393  struct v4l2_requestbuffers req = {
394  .memory = V4L2_MEMORY_MMAP,
395  .type = ctx->type,
396  .count = 0, /* 0 -> unmaps buffers from the driver */
397  };
398  int i, j;
399 
400  for (i = 0; i < ctx->num_buffers; i++) {
401  V4L2Buffer *buffer = &ctx->buffers[i];
402 
403  for (j = 0; j < buffer->num_planes; j++) {
404  struct V4L2Plane_info *p = &buffer->plane_info[j];
405  if (p->mm_addr && p->length)
406  if (munmap(p->mm_addr, p->length) < 0)
407  av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
408  }
409  }
410 
411  return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
412 }
413 
415 {
416  struct v4l2_format *fmt = &ctx->format;
417  uint32_t v4l2_fmt;
418  int ret;
419 
420  v4l2_fmt = ff_v4l2_format_avfmt_to_v4l2(pixfmt);
421  if (!v4l2_fmt)
422  return AVERROR(EINVAL);
423 
424  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
425  fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
426  else
427  fmt->fmt.pix.pixelformat = v4l2_fmt;
428 
429  fmt->type = ctx->type;
430 
431  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, fmt);
432  if (ret)
433  return AVERROR(EINVAL);
434 
435  return 0;
436 }
437 
439 {
440  enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
441  struct v4l2_fmtdesc fdesc;
442  int ret;
443 
444  memset(&fdesc, 0, sizeof(fdesc));
445  fdesc.type = ctx->type;
446 
447  if (pixfmt != AV_PIX_FMT_NONE) {
448  ret = v4l2_try_raw_format(ctx, pixfmt);
449  if (!ret)
450  return 0;
451  }
452 
453  for (;;) {
454  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
455  if (ret)
456  return AVERROR(EINVAL);
457 
458  pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO);
459  ret = v4l2_try_raw_format(ctx, pixfmt);
460  if (ret){
461  fdesc.index++;
462  continue;
463  }
464 
465  *p = pixfmt;
466 
467  return 0;
468  }
469 
470  return AVERROR(EINVAL);
471 }
472 
473 static int v4l2_get_coded_format(V4L2Context* ctx, uint32_t *p)
474 {
475  struct v4l2_fmtdesc fdesc;
476  uint32_t v4l2_fmt;
477  int ret;
478 
479  /* translate to a valid v4l2 format */
481  if (!v4l2_fmt)
482  return AVERROR(EINVAL);
483 
484  /* check if the driver supports this format */
485  memset(&fdesc, 0, sizeof(fdesc));
486  fdesc.type = ctx->type;
487 
488  for (;;) {
489  ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
490  if (ret)
491  return AVERROR(EINVAL);
492 
493  if (fdesc.pixelformat == v4l2_fmt)
494  break;
495 
496  fdesc.index++;
497  }
498 
499  *p = v4l2_fmt;
500 
501  return 0;
502 }
503 
504  /*****************************************************************************
505  *
506  * V4L2 Context Interface
507  *
508  *****************************************************************************/
509 
511 {
512  int type = ctx->type;
513  int ret;
514 
515  ret = ioctl(ctx_to_m2mctx(ctx)->fd, cmd, &type);
516  if (ret < 0)
517  return AVERROR(errno);
518 
519  ctx->streamon = (cmd == VIDIOC_STREAMON);
520 
521  return 0;
522 }
523 
525 {
527  V4L2Buffer* avbuf;
528  int ret;
529 
530  if (!frame) {
531  ret = v4l2_stop_encode(ctx);
532  if (ret)
533  av_log(logger(ctx), AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
534  s->draining= 1;
535  return 0;
536  }
537 
538  avbuf = v4l2_getfree_v4l2buf(ctx);
539  if (!avbuf)
540  return AVERROR(ENOMEM);
541 
542  ret = ff_v4l2_buffer_avframe_to_buf(frame, avbuf);
543  if (ret)
544  return ret;
545 
546  return ff_v4l2_buffer_enqueue(avbuf);
547 }
548 
550 {
552  V4L2Buffer* avbuf;
553  int ret;
554 
555  if (!pkt->size) {
556  ret = v4l2_stop_decode(ctx);
557  if (ret)
558  av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
559  s->draining = 1;
560  return 0;
561  }
562 
563  avbuf = v4l2_getfree_v4l2buf(ctx);
564  if (!avbuf)
565  return AVERROR(ENOMEM);
566 
567  ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
568  if (ret)
569  return ret;
570 
571  return ff_v4l2_buffer_enqueue(avbuf);
572 }
573 
575 {
576  V4L2Buffer* avbuf = NULL;
577 
578  /*
579  * blocks until:
580  * 1. decoded frame available
581  * 2. an input buffer is ready to be dequeued
582  */
583  avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
584  if (!avbuf) {
585  if (ctx->done)
586  return AVERROR_EOF;
587 
588  return AVERROR(EAGAIN);
589  }
590 
591  return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
592 }
593 
595 {
596  V4L2Buffer* avbuf = NULL;
597 
598  /*
599  * blocks until:
600  * 1. encoded packet available
601  * 2. an input buffer ready to be dequeued
602  */
603  avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
604  if (!avbuf) {
605  if (ctx->done)
606  return AVERROR_EOF;
607 
608  return AVERROR(EAGAIN);
609  }
610 
611  return ff_v4l2_buffer_buf_to_avpkt(pkt, avbuf);
612 }
613 
615 {
616  struct v4l2_format_update fmt = { 0 };
617  int ret;
618 
619  if (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
620  ret = v4l2_get_raw_format(ctx, &fmt.av_fmt);
621  if (ret)
622  return ret;
623 
624  fmt.update_avfmt = 1;
625  v4l2_save_to_context(ctx, &fmt);
626 
627  /* format has been tried already */
628  return ret;
629  }
630 
631  ret = v4l2_get_coded_format(ctx, &fmt.v4l2_fmt);
632  if (ret)
633  return ret;
634 
635  fmt.update_v4l2 = 1;
636  v4l2_save_to_context(ctx, &fmt);
637 
638  return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, &ctx->format);
639 }
640 
642 {
643  return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
644 }
645 
647 {
648  int ret;
649 
650  if (!ctx->buffers)
651  return;
652 
653  ret = v4l2_release_buffers(ctx);
654  if (ret)
655  av_log(logger(ctx), AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name);
656 
657  av_free(ctx->buffers);
658  ctx->buffers = NULL;
659 }
660 
662 {
664  struct v4l2_requestbuffers req;
665  int ret, i;
666 
667  if (!v4l2_type_supported(ctx)) {
668  av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
669  return AVERROR_PATCHWELCOME;
670  }
671 
672  ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
673  if (ret)
674  av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
675 
676  memset(&req, 0, sizeof(req));
677  req.count = ctx->num_buffers;
678  req.memory = V4L2_MEMORY_MMAP;
679  req.type = ctx->type;
680  ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
681  if (ret < 0)
682  return AVERROR(errno);
683 
684  ctx->num_buffers = req.count;
685  ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
686  if (!ctx->buffers) {
687  av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
688  return AVERROR(ENOMEM);
689  }
690 
691  for (i = 0; i < req.count; i++) {
692  ctx->buffers[i].context = ctx;
693  ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
694  if (ret < 0) {
695  av_log(logger(ctx), AV_LOG_ERROR, "%s buffer initialization (%s)\n", ctx->name, av_err2str(ret));
696  av_free(ctx->buffers);
697  return ret;
698  }
699  }
700 
701  av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
702  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
703  req.count,
704  v4l2_get_width(&ctx->format),
705  v4l2_get_height(&ctx->format),
706  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
707  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);
708 
709  return 0;
710 }
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
Definition: v4l2_fmt.c:132
static int v4l2_stop_encode(V4L2Context *ctx)
Definition: v4l2_context.c:234
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1574
This structure describes decoded (raw) audio or video data.
Definition: frame.h:268
const char * name
context name.
Definition: v4l2_context.h:40
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
const char * fmt
Definition: avisynth_c.h:861
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
Definition: v4l2_buffers.c:355
static int v4l2_handle_event(V4L2Context *ctx)
returns 1 if reinit was successful, negative if it failed returns 0 if reinit was not executed ...
Definition: v4l2_context.c:142
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
Definition: v4l2_context.c:661
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
Definition: v4l2_buffers.c:396
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
uint32_t ff_v4l2_format_avfmt_to_v4l2(enum AVPixelFormat avfmt)
Definition: v4l2_fmt.c:122
int size
Definition: avcodec.h:1478
GLint GLenum type
Definition: opengl_enc.c:104
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame)
Dequeues a buffer from a V4L2Context to an AVFrame.
Definition: v4l2_context.c:574
int width
Width and height of the frames it produces (in case of a capture context, e.g.
Definition: v4l2_context.h:71
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
Definition: v4l2_buffers.c:289
static int v4l2_type_supported(V4L2Context *ctx)
Definition: v4l2_context.c:85
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static AVPacket pkt
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
Definition: v4l2_context.c:594
enum V4L2Buffer_status status
Definition: v4l2_buffers.h:64
void ff_v4l2_context_release(V4L2Context *ctx)
Releases a V4L2Context.
Definition: v4l2_context.c:646
int ff_v4l2_context_set_format(V4L2Context *ctx)
Sets the V4L2Context format in the v4l2 driver.
Definition: v4l2_context.c:641
static V4L2Buffer * v4l2_getfree_v4l2buf(V4L2Context *ctx)
Definition: v4l2_context.c:372
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:99
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
Definition: v4l2_context.h:59
static unsigned int v4l2_get_width(struct v4l2_format *fmt)
Definition: v4l2_context.c:56
static V4L2m2mContext * ctx_to_m2mctx(V4L2Context *ctx)
Definition: v4l2_context.c:44
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
Definition: v4l2_buffers.c:465
#define height
#define AVERROR_EOF
End of file.
Definition: error.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define FFALIGN(x, a)
Definition: macros.h:48
static AVCodecContext * logger(V4L2Context *ctx)
Definition: v4l2_context.c:51
int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the capture paramete...
Definition: v4l2_m2m.c:189
#define av_log(a,...)
struct V4L2Context * context
Definition: v4l2_buffers.h:43
int done
Either no more buffers available or an unrecoverable error was notified by the V4L2 kernel driver: on...
Definition: v4l2_context.h:92
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:260
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
struct v4l2_buffer buf
Definition: v4l2_buffers.h:60
static int v4l2_get_framesize_compressed(V4L2Context *ctx, int width, int height)
Definition: v4l2_context.c:93
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
Definition: v4l2_buffers.c:304
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:53
static void v4l2_save_to_context(V4L2Context *ctx, struct v4l2_format_update *fmt)
Definition: v4l2_context.c:107
static int v4l2_release_buffers(V4L2Context *ctx)
Definition: v4l2_context.c:391
V4L2Buffer * buffers
Indexed array of V4L2Buffers.
Definition: v4l2_context.h:76
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:86
#define width
static int v4l2_get_raw_format(V4L2Context *ctx, enum AVPixelFormat *p)
Definition: v4l2_context.c:438
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
Definition: v4l2_context.c:549
static int v4l2_try_raw_format(V4L2Context *ctx, enum AVPixelFormat pixfmt)
Definition: v4l2_context.c:414
struct v4l2_plane planes[VIDEO_MAX_PLANES]
Definition: v4l2_buffers.h:61
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static int v4l2_stop_decode(V4L2Context *ctx)
Definition: v4l2_context.c:216
enum AVPixelFormat av_fmt
Definition: v4l2_context.c:40
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:65
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
Definition: v4l2_context.c:510
static V4L2Buffer * v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
Definition: v4l2_context.c:252
V4L2Context capture
Definition: v4l2_m2m.h:48
Libavcodec external API header.
main external API structure.
Definition: avcodec.h:1565
V4L2Buffer (wrapper for v4l2_buffer management)
Definition: v4l2_buffers.h:41
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
Definition: v4l2_buffers.c:380
void * buf
Definition: avisynth_c.h:766
int ff_v4l2_m2m_codec_full_reinit(V4L2m2mContext *s)
Reinitializes the V4L2m2mContext when the driver cannot continue processing with the any of the curre...
Definition: v4l2_m2m.c:231
#define container_of(ptr, type, member)
Definition: v4l2_m2m.h:35
V4L2Context output
Definition: v4l2_m2m.h:49
static int v4l2_get_coded_format(V4L2Context *ctx, uint32_t *p)
Definition: v4l2_context.c:473
static void reinit(Jpeg2000EncoderContext *s)
Definition: j2kenc.c:985
static const struct @307 planes[]
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
Definition: v4l2_context.c:524
common internal api header.
uint32_t ff_v4l2_format_avcodec_to_v4l2(enum AVCodecID avcodec)
Definition: v4l2_fmt.c:112
#define av_free(p)
int num_planes
Definition: v4l2_buffers.h:57
int ff_v4l2_context_get_format(V4L2Context *ctx)
Queries the driver for a valid v4l2 format and copies it to the context.
Definition: v4l2_context.c:614
enum AVPixelFormat pixfmt
Definition: kmsgrab.c:202
int num_buffers
Readonly after init.
Definition: v4l2_context.h:81
void INT64 start
Definition: avisynth_c.h:766
static unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
Definition: v4l2_context.c:66
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1454
static unsigned int v4l2_get_height(struct v4l2_format *fmt)
Definition: v4l2_context.c:61
GLuint buffer
Definition: opengl_enc.c:101
enum v4l2_buf_type type
Type of this buffer context.
Definition: v4l2_context.h:47