Go to the documentation of this file.
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
38 #define MPEG_CID(x) V4L2_CID_MPEG_VIDEO_##x
39 #define MPEG_VIDEO(x) V4L2_MPEG_VIDEO_##x
43 struct v4l2_streamparm parm = { 0 };
45 parm.type = V4L2_TYPE_IS_MULTIPLANAR(
s->output.type) ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT;
46 parm.parm.output.timeperframe.denominator = den;
47 parm.parm.output.timeperframe.numerator = num;
49 if (ioctl(
s->fd, VIDIOC_S_PARM, &parm) < 0)
55 struct v4l2_ext_controls ctrls = { { 0 } };
56 struct v4l2_ext_control ctrl = { 0 };
59 ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
60 ctrls.controls = &ctrl;
67 if (ioctl(
s->fd, VIDIOC_S_EXT_CTRLS, &ctrls) < 0)
69 "Failed to set %s: %s\n",
name, strerror(errno));
76 struct v4l2_ext_controls ctrls = { { 0 } };
77 struct v4l2_ext_control ctrl = { 0 };
81 ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
82 ctrls.controls = &ctrl;
88 ret = ioctl(
s->fd, VIDIOC_G_EXT_CTRLS, &ctrls);
91 "Failed to get %s\n",
name);
102 static const struct h264_profile {
103 unsigned int ffmpeg_val;
104 unsigned int v4l2_val;
129 static const struct mpeg4_profile {
130 unsigned int ffmpeg_val;
131 unsigned int v4l2_val;
150 if (
s->avctx->max_b_frames)
155 if (
s->avctx->max_b_frames == 0)
165 struct v4l2_event_subscription sub;
167 memset(&sub, 0,
sizeof(sub));
168 sub.type = V4L2_EVENT_EOS;
169 if (ioctl(
s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub) < 0)
171 "the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT\n");
177 int qmin_cid, qmax_cid, qmin, qmax;
202 "Encoder Context: id (%d), profile (%d), frame rate(%d/%d), number b-frames (%d), "
203 "gop size (%d), bit rate (%"PRId64
"), qmin (%d), qmax (%d)\n",
260 "exceed qmax\n", avctx->
qmin, avctx->
qmax);
262 qmin = avctx->
qmin >= 0 ? avctx->
qmin : qmin;
263 qmax = avctx->
qmax >= 0 ? avctx->
qmax : qmax;
279 #ifdef V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME
298 if (!
frame->buf[0]) {
340 uint32_t v4l2_fmt_output;
347 capture = &
s->capture;
369 if (V4L2_TYPE_IS_MULTIPLANAR(
output->type))
370 v4l2_fmt_output =
output->format.fmt.pix_mp.pixelformat;
372 v4l2_fmt_output =
output->format.fmt.pix.pixelformat;
375 if (pix_fmt_output != avctx->
pix_fmt) {
389 #define OFFSET(x) offsetof(V4L2m2mPriv, x)
390 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
392 #define V4L_M2M_CAPTURE_OPTS \
393 V4L_M2M_DEFAULT_OPTS,\
394 { "num_capture_buffers", "Number of buffers in the capture context", \
395 OFFSET(num_capture_buffers), AV_OPT_TYPE_INT, {.i64 = 4 }, 4, INT_MAX, FLAGS }
414 #define M2MENC_CLASS(NAME, OPTIONS_NAME) \
415 static const AVClass v4l2_m2m_ ## NAME ## _enc_class = { \
416 .class_name = #NAME "_v4l2m2m_encoder", \
417 .item_name = av_default_item_name, \
418 .option = OPTIONS_NAME, \
419 .version = LIBAVUTIL_VERSION_INT, \
422 #define M2MENC(NAME, LONGNAME, OPTIONS_NAME, CODEC) \
423 M2MENC_CLASS(NAME, OPTIONS_NAME) \
424 const FFCodec ff_ ## NAME ## _v4l2m2m_encoder = { \
425 .p.name = #NAME "_v4l2m2m" , \
426 CODEC_LONG_NAME("V4L2 mem2mem " LONGNAME " encoder wrapper"), \
427 .p.type = AVMEDIA_TYPE_VIDEO, \
429 .priv_data_size = sizeof(V4L2m2mPriv), \
430 .p.priv_class = &v4l2_m2m_ ## NAME ##_enc_class, \
431 .init = v4l2_encode_init, \
432 FF_CODEC_RECEIVE_PACKET_CB(v4l2_receive_packet), \
433 .close = v4l2_encode_close, \
434 .defaults = v4l2_m2m_defaults, \
435 .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY, \
436 .color_ranges = AVCOL_RANGE_MPEG, \
437 .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE | \
438 FF_CODEC_CAP_INIT_CLEANUP, \
439 .p.wrapper_name = "v4l2m2m", \
static unsigned int v4l2_h264_profile_from_ff(int p)
#define AV_LOG_WARNING
Something somehow does not look correct.
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static const FFCodecDefault v4l2_m2m_defaults[]
#define AV_PROFILE_H264_HIGH_10_INTRA
#define AV_PROFILE_MPEG4_ADVANCED_SIMPLE
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define AV_PROFILE_MPEG4_SIMPLE_SCALABLE
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
#define AV_PROFILE_H264_MAIN
static av_cold int v4l2_encode_close(AVCodecContext *avctx)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static av_cold int v4l2_encode_init(AVCodecContext *avctx)
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
static void v4l2_subscribe_eos_event(V4L2m2mContext *s)
This structure describes decoded (raw) audio or video data.
int ff_v4l2_context_dequeue_packet(V4L2Context *ctx, AVPacket *pkt)
Dequeues a buffer from a V4L2Context to an AVPacket.
static int v4l2_send_frame(AVCodecContext *avctx, const AVFrame *frame)
static int v4l2_get_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int *value, const char *name, int log_warning)
int qmax
maximum quantizer
#define AV_PROFILE_H264_EXTENDED
#define M2MENC(NAME, LONGNAME, OPTIONS_NAME, CODEC)
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define AV_PROFILE_UNKNOWN
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
static void v4l2_set_timeperframe(V4L2m2mContext *s, unsigned int num, unsigned int den)
#define AV_PROFILE_H264_HIGH_10
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PROFILE_H264_HIGH_422_INTRA
int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
Probes the video nodes looking for the required codec capabilities.
#define AV_PROFILE_H264_HIGH_422
static int v4l2_mpeg4_profile_from_ff(int p)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static const AVOption options[]
int64_t bit_rate
the average bitrate
@ AV_PICTURE_TYPE_I
Intra.
int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
Allocate a new context and references for a V4L2 M2M instance.
static const AVOption mpeg4_options[]
static int v4l2_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv)
Releases all the codec resources if all AVBufferRefs have been returned to the ctx.
int width
Width and height of the frames it produces (in case of a capture context, e.g.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PROFILE_MPEG4_ADVANCED_CODING
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
#define FF_MPEG4_PROFILE_OPTS
#define AV_PROFILE_MPEG4_SIMPLE
#define i(width, name, range_min, range_max)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int ff_v4l2_context_enqueue_frame(V4L2Context *ctx, const AVFrame *frame)
Enqueues a buffer to a V4L2Context from an AVFrame.
static int v4l2_prepare_encoder(V4L2m2mContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PROFILE_H264_HIGH_444_PREDICTIVE
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PROFILE_H264_BASELINE
#define AV_PROFILE_MPEG4_CORE
main external API structure.
#define AV_PROFILE_H264_HIGH
int qmin
minimum quantizer
static void v4l2_set_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int value, const char *name, int log_warning)
#define AV_PROFILE_H264_CONSTRAINED_BASELINE
#define AV_PROFILE_H264_HIGH_444_INTRA
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
int width
picture width / height.
#define V4L_M2M_CAPTURE_OPTS
static int v4l2_check_b_frame_support(V4L2m2mContext *s)