24 #include <linux/videodev2.h> 25 #include <sys/ioctl.h> 37 #define USEC_PER_SEC 1000000 80 avbuf->
buf.timestamp.tv_usec;
87 enum v4l2_ycbcr_encoding ycbcr;
88 enum v4l2_colorspace cs;
90 cs = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
94 ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
99 case V4L2_YCBCR_ENC_XV709:
101 case V4L2_YCBCR_ENC_XV601:
121 enum v4l2_quantization qt;
123 qt = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
139 enum v4l2_ycbcr_encoding ycbcr;
140 enum v4l2_colorspace cs;
142 cs = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
146 ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
157 case V4L2_COLORSPACE_BT2020:
158 if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
171 enum v4l2_ycbcr_encoding ycbcr;
172 enum v4l2_xfer_func xfer;
173 enum v4l2_colorspace cs;
175 cs = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
179 ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
183 xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->
buf.type) ?
204 case V4L2_YCBCR_ENC_XV709:
279 unsigned int bytesused,
length;
285 bytesused =
FFMIN(size+offset, length);
289 if (V4L2_TYPE_IS_MULTIPLANAR(out->
buf.type)) {
290 out->
planes[plane].bytesused = bytesused;
293 out->
buf.bytesused = bytesused;
345 int pixel_format = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
346 fmt.fmt.pix_mp.pixelformat : fmt.fmt.pix.pixelformat;
347 int height = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
348 fmt.fmt.pix_mp.height : fmt.fmt.pix.height;
349 int is_planar_format = 0;
351 switch (pixel_format) {
352 case V4L2_PIX_FMT_YUV420M:
353 case V4L2_PIX_FMT_YVU420M:
354 #ifdef V4L2_PIX_FMT_YUV422M 355 case V4L2_PIX_FMT_YUV422M:
357 #ifdef V4L2_PIX_FMT_YVU422M 358 case V4L2_PIX_FMT_YVU422M:
360 #ifdef V4L2_PIX_FMT_YUV444M 361 case V4L2_PIX_FMT_YUV444M:
363 #ifdef V4L2_PIX_FMT_YVU444M 364 case V4L2_PIX_FMT_YVU444M:
366 case V4L2_PIX_FMT_NV12M:
367 case V4L2_PIX_FMT_NV21M:
368 case V4L2_PIX_FMT_NV12MT_16X16:
369 case V4L2_PIX_FMT_NV12MT:
370 case V4L2_PIX_FMT_NV16M:
371 case V4L2_PIX_FMT_NV61M:
372 is_planar_format = 1;
375 if (!is_planar_format) {
383 for (i = 0; i < planes_nb; i++) {
385 if (i == 1 || i == 2) {
431 frame->
key_frame = !!(avbuf->
buf.flags & V4L2_BUF_FLAG_KEYFRAME);
445 if (avbuf->
buf.flags & V4L2_BUF_FLAG_ERROR) {
462 pkt->
size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->
buf.type) ? avbuf->
buf.m.planes[0].bytesused : avbuf->
buf.bytesused;
465 if (avbuf->
buf.flags & V4L2_BUF_FLAG_KEYFRAME)
468 if (avbuf->
buf.flags & V4L2_BUF_FLAG_ERROR) {
489 out->
flags = V4L2_BUF_FLAG_KEYFRAME;
499 avbuf->
buf.memory = V4L2_MEMORY_MMAP;
503 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->
type)) {
504 avbuf->
buf.length = VIDEO_MAX_PLANES;
512 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->
type)) {
515 for (i = 0; i < avbuf->
buf.length; i++) {
516 if (avbuf->
buf.m.planes[i].length)
525 ctx->
format.fmt.pix_mp.plane_fmt[
i].bytesperline :
526 ctx->
format.fmt.pix.bytesperline;
528 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->
type)) {
531 PROT_READ | PROT_WRITE, MAP_SHARED,
536 PROT_READ | PROT_WRITE, MAP_SHARED,
546 if (V4L2_TYPE_IS_OUTPUT(ctx->
type))
549 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->
type)) {
554 avbuf->
buf.bytesused = avbuf->
planes[0].bytesused;
555 avbuf->
buf.length = avbuf->
planes[0].length;
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
int plane
Which of the 4 planes contains the component.
static AVCodecContext * logger(V4L2Buffer *buf)
static AVRational v4l2_get_timebase(V4L2Buffer *avbuf)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
const char * name
context name.
ptrdiff_t const GLvoid * data
static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVPacket.
int ff_v4l2_buffer_initialize(V4L2Buffer *avbuf, int index)
Initializes a V4L2Buffer.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
int width
Width and height of the frames it produces (in case of a capture context, e.g.
int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
Extracts the data from an AVFrame to a V4L2Buffer.
enum V4L2Buffer_status status
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
AVColorTransferCharacteristic
Color Transfer Characteristic.
functionally identical to above
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVColorSpace
YUV colorspace type.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
struct V4L2Buffer::V4L2Plane_info plane_info[VIDEO_MAX_PLANES]
static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t *data, int size, int offset)
int ff_v4l2_buffer_enqueue(V4L2Buffer *avbuf)
Enqueues a V4L2Buffer.
AVColorRange
Visual content value range.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
struct V4L2Context * context
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
ITU-R BT1361 Extended Colour Gamut.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
#define atomic_load(object)
static int v4l2_buf_increase_ref(V4L2Buffer *in)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
Extracts the data from a V4L2Buffer to an AVFrame.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
enum AVColorSpace colorspace
YUV colorspace type.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
#define atomic_fetch_sub(object, operand)
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
int flags
A combination of AV_PKT_FLAG values.
as above, but U and V bytes are swapped
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
uint8_t nb_components
The number of components each pixel has, (1-4)
AVBufferRef * context_ref
ITU-R BT2020 non-constant luminance system.
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
struct v4l2_plane planes[VIDEO_MAX_PLANES]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int64_t v4l2_get_pts(V4L2Buffer *avbuf)
static AVRational v4l2_timebase
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
#define atomic_fetch_add_explicit(object, operand, order)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
#define atomic_fetch_add(object, operand)
AVRational sample_aspect_ratio
atomic_uint context_refcount
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Libavcodec external API header.
#define atomic_fetch_sub_explicit(object, operand, order)
functionally identical to above
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint8_t * data
The data buffer.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
V4L2Buffer (wrapper for v4l2_buffer management)
static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
Extracts the data from an AVPacket to a V4L2Buffer.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define container_of(ptr, type, member)
Rational number (pair of numerator and denominator).
#define FF_DECODE_ERROR_INVALID_BITSTREAM
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
int size
Size of data in bytes.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Narrow or limited range content.
ITU-R BT2020 constant luminance system.
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
A reference to a data buffer.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
IEC 61966-2-1 (sRGB or sYCC)
common internal api header.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
int key_frame
1 -> keyframe, 0-> not
enum AVColorPrimaries color_primaries
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
static V4L2m2mContext * buf_to_m2mctx(V4L2Buffer *buf)
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
enum AVColorTransferCharacteristic color_trc
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static void v4l2_free_buffer(void *opaque, uint8_t *unused)
static void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
#define AV_NOPTS_VALUE
Undefined timestamp value.
enum v4l2_buf_type type
Type of this buffer context.
#define AV_CEIL_RSHIFT(a, b)