FFmpeg
v4l2_buffers.c
Go to the documentation of this file.
1 /*
2  * V4L2 buffer helper functions.
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <poll.h>
30 #include "libavcodec/avcodec.h"
31 #include "libavcodec/internal.h"
32 #include "v4l2_context.h"
33 #include "v4l2_buffers.h"
34 #include "v4l2_m2m.h"
35 
36 #define USEC_PER_SEC 1000000
37 
39 {
40  return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
42  container_of(buf->context, V4L2m2mContext, capture);
43 }
44 
46 {
47  return buf_to_m2mctx(buf)->avctx;
48 }
49 
50 static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
51 {
53  AVRational v4l2_timebase = { 1, USEC_PER_SEC };
54  int64_t v4l2_pts;
55 
56  if (pts == AV_NOPTS_VALUE)
57  pts = 0;
58 
59  /* convert pts to v4l2 timebase */
60  v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
61  out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
62  out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
63 }
64 
65 static inline uint64_t v4l2_get_pts(V4L2Buffer *avbuf)
66 {
67  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
68  AVRational v4l2_timebase = { 1, USEC_PER_SEC };
69  int64_t v4l2_pts;
70 
71  /* convert pts back to encoder timebase */
72  v4l2_pts = (int64_t)avbuf->buf.timestamp.tv_sec * USEC_PER_SEC +
73  avbuf->buf.timestamp.tv_usec;
74 
75  return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
76 }
77 
79 {
80  enum v4l2_ycbcr_encoding ycbcr;
81  enum v4l2_colorspace cs;
82 
83  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
84  buf->context->format.fmt.pix_mp.colorspace :
85  buf->context->format.fmt.pix.colorspace;
86 
87  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
88  buf->context->format.fmt.pix_mp.ycbcr_enc:
89  buf->context->format.fmt.pix.ycbcr_enc;
90 
91  switch(ycbcr) {
92  case V4L2_YCBCR_ENC_XV709:
93  case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
94  case V4L2_YCBCR_ENC_XV601:
95  case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
96  default:
97  break;
98  }
99 
100  switch(cs) {
101  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
102  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
103  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
104  case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
105  default:
106  break;
107  }
108 
109  return AVCOL_PRI_UNSPECIFIED;
110 }
111 
113 {
114  enum v4l2_quantization qt;
115 
116  qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
117  buf->context->format.fmt.pix_mp.quantization :
118  buf->context->format.fmt.pix.quantization;
119 
120  switch (qt) {
121  case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
122  case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
123  default:
124  break;
125  }
126 
128 }
129 
131 {
132  enum v4l2_ycbcr_encoding ycbcr;
133  enum v4l2_colorspace cs;
134 
135  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
136  buf->context->format.fmt.pix_mp.colorspace :
137  buf->context->format.fmt.pix.colorspace;
138 
139  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
140  buf->context->format.fmt.pix_mp.ycbcr_enc:
141  buf->context->format.fmt.pix.ycbcr_enc;
142 
143  switch(cs) {
144  case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
145  case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
146  case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
147  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
148  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
149  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
150  case V4L2_COLORSPACE_BT2020:
151  if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
152  return AVCOL_SPC_BT2020_CL;
153  else
154  return AVCOL_SPC_BT2020_NCL;
155  default:
156  break;
157  }
158 
159  return AVCOL_SPC_UNSPECIFIED;
160 }
161 
163 {
164  enum v4l2_ycbcr_encoding ycbcr;
165  enum v4l2_xfer_func xfer;
166  enum v4l2_colorspace cs;
167 
168  cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
169  buf->context->format.fmt.pix_mp.colorspace :
170  buf->context->format.fmt.pix.colorspace;
171 
172  ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
173  buf->context->format.fmt.pix_mp.ycbcr_enc:
174  buf->context->format.fmt.pix.ycbcr_enc;
175 
176  xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
177  buf->context->format.fmt.pix_mp.xfer_func:
178  buf->context->format.fmt.pix.xfer_func;
179 
180  switch (xfer) {
181  case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
182  case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
183  default:
184  break;
185  }
186 
187  switch (cs) {
188  case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
189  case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
190  case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
191  case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
192  default:
193  break;
194  }
195 
196  switch (ycbcr) {
197  case V4L2_YCBCR_ENC_XV709:
198  case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
199  default:
200  break;
201  }
202 
203  return AVCOL_TRC_UNSPECIFIED;
204 }
205 
206 static void v4l2_free_buffer(void *opaque, uint8_t *unused)
207 {
208  V4L2Buffer* avbuf = opaque;
209  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
210 
211  if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
212  atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
213 
214  if (s->reinit) {
215  if (!atomic_load(&s->refcount))
216  sem_post(&s->refsync);
217  } else {
218  if (s->draining) {
219  /* no need to queue more buffers to the driver */
220  avbuf->status = V4L2BUF_AVAILABLE;
221  }
222  else if (avbuf->context->streamon)
223  ff_v4l2_buffer_enqueue(avbuf);
224  }
225 
226  av_buffer_unref(&avbuf->context_ref);
227  }
228 }
229 
231 {
233 
234  if (plane >= in->num_planes)
235  return AVERROR(EINVAL);
236 
237  /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
238  *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
239  in->plane_info[plane].length, v4l2_free_buffer, in, 0);
240  if (!*buf)
241  return AVERROR(ENOMEM);
242 
243  if (in->context_ref)
244  atomic_fetch_add(&in->context_refcount, 1);
245  else {
246  in->context_ref = av_buffer_ref(s->self_ref);
247  if (!in->context_ref) {
249  return AVERROR(ENOMEM);
250  }
251  in->context_refcount = 1;
252  }
253 
254  in->status = V4L2BUF_RET_USER;
255  atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
256 
257  return 0;
258 }
259 
260 static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, AVBufferRef* bref)
261 {
262  unsigned int bytesused, length;
263 
264  if (plane >= out->num_planes)
265  return AVERROR(EINVAL);
266 
267  bytesused = FFMIN(size, out->plane_info[plane].length);
268  length = out->plane_info[plane].length;
269 
270  memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, out->plane_info[plane].length));
271 
272  if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
273  out->planes[plane].bytesused = bytesused;
274  out->planes[plane].length = length;
275  } else {
276  out->buf.bytesused = bytesused;
277  out->buf.length = length;
278  }
279 
280  return 0;
281 }
282 
283 /******************************************************************************
284  *
285  * V4L2uffer interface
286  *
287  ******************************************************************************/
288 
290 {
291  int i, ret;
292 
293  for(i = 0; i < out->num_planes; i++) {
294  ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, frame->buf[i]);
295  if (ret)
296  return ret;
297  }
298 
299  v4l2_set_pts(out, frame->pts);
300 
301  return 0;
302 }
303 
305 {
306  V4L2m2mContext *s = buf_to_m2mctx(avbuf);
307  int i, ret;
308 
310 
311  /* 1. get references to the actual data */
312  for (i = 0; i < avbuf->num_planes; i++) {
313  ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
314  if (ret)
315  return ret;
316 
317  frame->linesize[i] = avbuf->plane_info[i].bytesperline;
318  frame->data[i] = frame->buf[i]->data;
319  }
320 
321  /* 1.1 fixup special cases */
322  switch (avbuf->context->av_pix_fmt) {
323  case AV_PIX_FMT_NV12:
324  if (avbuf->num_planes > 1)
325  break;
326  frame->linesize[1] = avbuf->plane_info[0].bytesperline;
327  frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
328  break;
329  default:
330  break;
331  }
332 
333  /* 2. get frame information */
334  frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
335  frame->format = avbuf->context->av_pix_fmt;
336  frame->color_primaries = v4l2_get_color_primaries(avbuf);
337  frame->colorspace = v4l2_get_color_space(avbuf);
338  frame->color_range = v4l2_get_color_range(avbuf);
339  frame->color_trc = v4l2_get_color_trc(avbuf);
340  frame->pts = v4l2_get_pts(avbuf);
341 
342  /* these two values are updated also during re-init in v4l2_process_driver_event */
343  frame->height = s->output.height;
344  frame->width = s->output.width;
345 
346  /* 3. report errors upstream */
347  if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
348  av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
349  frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
350  }
351 
352  return 0;
353 }
354 
356 {
357  int ret;
358 
360  ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
361  if (ret)
362  return ret;
363 
364  pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
365  pkt->data = pkt->buf->data;
366 
367  if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
369 
370  if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
371  av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
373  }
374 
375  pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
376 
377  return 0;
378 }
379 
381 {
382  int ret;
383 
385  if (ret)
386  return ret;
387 
388  v4l2_set_pts(out, pkt->pts);
389 
390  if (pkt->flags & AV_PKT_FLAG_KEY)
391  out->flags = V4L2_BUF_FLAG_KEYFRAME;
392 
393  return 0;
394 }
395 
397 {
398  V4L2Context *ctx = avbuf->context;
399  int ret, i;
400 
401  avbuf->buf.memory = V4L2_MEMORY_MMAP;
402  avbuf->buf.type = ctx->type;
403  avbuf->buf.index = index;
404 
405  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
406  avbuf->buf.length = VIDEO_MAX_PLANES;
407  avbuf->buf.m.planes = avbuf->planes;
408  }
409 
410  ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
411  if (ret < 0)
412  return AVERROR(errno);
413 
414  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
415  avbuf->num_planes = 0;
416  for (;;) {
417  /* in MP, the V4L2 API states that buf.length means num_planes */
418  if (avbuf->num_planes >= avbuf->buf.length)
419  break;
420  if (avbuf->buf.m.planes[avbuf->num_planes].length)
421  avbuf->num_planes++;
422  }
423  } else
424  avbuf->num_planes = 1;
425 
426  for (i = 0; i < avbuf->num_planes; i++) {
427 
428  avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
429  ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
430  ctx->format.fmt.pix.bytesperline;
431 
432  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
433  avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
434  avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
435  PROT_READ | PROT_WRITE, MAP_SHARED,
436  buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
437  } else {
438  avbuf->plane_info[i].length = avbuf->buf.length;
439  avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
440  PROT_READ | PROT_WRITE, MAP_SHARED,
441  buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
442  }
443 
444  if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
445  return AVERROR(ENOMEM);
446  }
447 
448  avbuf->status = V4L2BUF_AVAILABLE;
449 
450  if (V4L2_TYPE_IS_OUTPUT(ctx->type))
451  return 0;
452 
453  if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
454  avbuf->buf.m.planes = avbuf->planes;
455  avbuf->buf.length = avbuf->num_planes;
456 
457  } else {
458  avbuf->buf.bytesused = avbuf->planes[0].bytesused;
459  avbuf->buf.length = avbuf->planes[0].length;
460  }
461 
462  return ff_v4l2_buffer_enqueue(avbuf);
463 }
464 
466 {
467  int ret;
468 
469  avbuf->buf.flags = avbuf->flags;
470 
471  ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
472  if (ret < 0)
473  return AVERROR(errno);
474 
475  avbuf->status = V4L2BUF_IN_DRIVER;
476 
477  return 0;
478 }
V4L2Buffer::num_planes
int num_planes
Definition: v4l2_buffers.h:57
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
V4L2Context::av_pix_fmt
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:53
v4l2_get_color_space
static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
Definition: v4l2_buffers.c:130
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:467
out
FILE * out
Definition: movenc.c:54
v4l2_buffers.h
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_v4l2_buffer_buf_to_avpkt
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
Definition: v4l2_buffers.c:355
V4L2m2mContext
Definition: v4l2_m2m.h:43
v4l2_bufref_to_buf
static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t *data, int size, AVBufferRef *bref)
Definition: v4l2_buffers.c:260
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
V4L2Buffer::V4L2Plane_info::length
size_t length
Definition: v4l2_buffers.h:54
v4l2_set_pts
static void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
Definition: v4l2_buffers.c:50
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:470
data
const char data[16]
Definition: mxf.c:91
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:497
USEC_PER_SEC
#define USEC_PER_SEC
Definition: v4l2_buffers.c:36
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:443
logger
static AVCodecContext * logger(V4L2Buffer *buf)
Definition: v4l2_buffers.c:45
AVCOL_SPC_BT2020_CL
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
Definition: pixfmt.h:508
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:481
V4L2Buffer::buf
struct v4l2_buffer buf
Definition: v4l2_buffers.h:60
ff_v4l2_buffer_buf_to_avframe
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
Definition: v4l2_buffers.c:304
plane
int plane
Definition: avisynth_c.h:384
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:473
pts
static int64_t pts
Definition: transcode_aac.c:647
atomic_fetch_sub
#define atomic_fetch_sub(object, operand)
Definition: stdatomic.h:137
V4L2Context::streamon
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:86
v4l2_get_pts
static uint64_t v4l2_get_pts(V4L2Buffer *avbuf)
Definition: v4l2_buffers.c:65
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:472
V4L2Buffer::context
struct V4L2Context * context
Definition: v4l2_buffers.h:43
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
ff_v4l2_buffer_avframe_to_buf
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
Definition: v4l2_buffers.c:289
buf
void * buf
Definition: avisynth_c.h:766
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1510
AudioConvert::planes
int planes
Definition: audio_convert.c:55
V4L2Buffer
V4L2Buffer (wrapper for v4l2_buffer management)
Definition: v4l2_buffers.h:41
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCOL_TRC_BT1361_ECG
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
Definition: pixfmt.h:480
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:503
V4L2BUF_RET_USER
@ V4L2BUF_RET_USER
Definition: v4l2_buffers.h:35
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
v4l2_get_color_primaries
static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
Definition: v4l2_buffers.c:78
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
functionally identical to above
Definition: pixfmt.h:452
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:446
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:450
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:451
ff_v4l2_buffer_enqueue
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
Definition: v4l2_buffers.c:465
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1460
V4L2Context
Definition: v4l2_context.h:36
NULL
#define NULL
Definition: coverity.c:32
FF_DECODE_ERROR_INVALID_BITSTREAM
#define FF_DECODE_ERROR_INVALID_BITSTREAM
Definition: frame.h:591
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
container_of
#define container_of(ptr, type, member)
Definition: v4l2_m2m.h:35
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:445
V4L2Buffer::V4L2Plane_info::mm_addr
void * mm_addr
Definition: v4l2_buffers.h:53
v4l2_get_color_range
static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
Definition: v4l2_buffers.c:112
atomic_fetch_sub_explicit
#define atomic_fetch_sub_explicit(object, operand, order)
Definition: stdatomic.h:152
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:520
index
int index
Definition: gxfenc.c:89
ff_v4l2_buffer_avpkt_to_buf
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
Definition: v4l2_buffers.c:380
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:454
V4L2BUF_AVAILABLE
@ V4L2BUF_AVAILABLE
Definition: v4l2_buffers.h:33
AVPacket::size
int size
Definition: avcodec.h:1478
V4L2Buffer::context_ref
AVBufferRef * context_ref
Definition: v4l2_buffers.h:47
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:475
V4L2Buffer::plane_info
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
V4L2BUF_IN_DRIVER
@ V4L2BUF_IN_DRIVER
Definition: v4l2_buffers.h:34
size
int size
Definition: twinvq_data.h:11134
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
V4L2Buffer::context_refcount
atomic_uint context_refcount
Definition: v4l2_buffers.h:48
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:469
V4L2m2mContext::avctx
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:504
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:507
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:496
V4L2Buffer::flags
int flags
Definition: v4l2_buffers.h:63
V4L2Buffer::planes
struct v4l2_plane planes[VIDEO_MAX_PLANES]
Definition: v4l2_buffers.h:61
V4L2Buffer::V4L2Plane_info::bytesperline
int bytesperline
Definition: v4l2_buffers.h:52
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
v4l2_context.h
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:499
v4l2_buf_to_bufref
static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
Definition: v4l2_buffers.c:230
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
avcodec.h
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:448
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
buf_to_m2mctx
static V4L2m2mContext * buf_to_m2mctx(V4L2Buffer *buf)
Definition: v4l2_buffers.c:38
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:501
sem_post
#define sem_post(psem)
Definition: semaphore.h:26
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:474
v4l2_free_buffer
static void v4l2_free_buffer(void *opaque, uint8_t *unused)
Definition: v4l2_buffers.c:206
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
V4L2Context::name
const char * name
context name.
Definition: v4l2_context.h:40
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
V4L2Context::format
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:65
v4l2_m2m.h
ff_v4l2_buffer_initialize
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
Definition: v4l2_buffers.c:396
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
v4l2_get_color_trc
static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
Definition: v4l2_buffers.c:162
length
const char int length
Definition: avisynth_c.h:860
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
AVColorRange
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:519
V4L2Buffer::status
enum V4L2Buffer_status status
Definition: v4l2_buffers.h:64