FFmpeg
v4l2_m2m_dec.c
Go to the documentation of this file.
1 /*
2  * V4L2 mem2mem decoders
3  *
4  * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5  * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <linux/videodev2.h>
25 #include <sys/ioctl.h>
26 #include "libavutil/pixfmt.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/opt.h"
29 #include "libavcodec/avcodec.h"
30 #include "libavcodec/decode.h"
31 #include "libavcodec/internal.h"
32 
33 #include "v4l2_context.h"
34 #include "v4l2_m2m.h"
35 #include "v4l2_fmt.h"
36 
37 static int v4l2_try_start(AVCodecContext *avctx)
38 {
39  V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context;
40  V4L2Context *const capture = &s->capture;
41  V4L2Context *const output = &s->output;
42  struct v4l2_selection selection = { 0 };
43  int ret;
44 
45  /* 1. start the output process */
46  if (!output->streamon) {
47  ret = ff_v4l2_context_set_status(output, VIDIOC_STREAMON);
48  if (ret < 0) {
49  av_log(avctx, AV_LOG_DEBUG, "VIDIOC_STREAMON on output context\n");
50  return ret;
51  }
52  }
53 
54  if (capture->streamon)
55  return 0;
56 
57  /* 2. get the capture format */
58  capture->format.type = capture->type;
59  ret = ioctl(s->fd, VIDIOC_G_FMT, &capture->format);
60  if (ret) {
61  av_log(avctx, AV_LOG_WARNING, "VIDIOC_G_FMT ioctl\n");
62  return ret;
63  }
64 
65  /* 2.1 update the AVCodecContext */
66  avctx->pix_fmt = ff_v4l2_format_v4l2_to_avfmt(capture->format.fmt.pix_mp.pixelformat, AV_CODEC_ID_RAWVIDEO);
67  capture->av_pix_fmt = avctx->pix_fmt;
68 
69  /* 3. set the crop parameters */
70  selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
71  selection.r.height = avctx->coded_height;
72  selection.r.width = avctx->coded_width;
73  ret = ioctl(s->fd, VIDIOC_S_SELECTION, &selection);
74  if (!ret) {
75  ret = ioctl(s->fd, VIDIOC_G_SELECTION, &selection);
76  if (ret) {
77  av_log(avctx, AV_LOG_WARNING, "VIDIOC_G_SELECTION ioctl\n");
78  } else {
79  av_log(avctx, AV_LOG_DEBUG, "crop output %dx%d\n", selection.r.width, selection.r.height);
80  /* update the size of the resulting frame */
81  capture->height = selection.r.height;
82  capture->width = selection.r.width;
83  }
84  }
85 
86  /* 4. init the capture context now that we have the capture format */
87  if (!capture->buffers) {
88  ret = ff_v4l2_context_init(capture);
89  if (ret) {
90  av_log(avctx, AV_LOG_ERROR, "can't request capture buffers\n");
91  return AVERROR(ENOMEM);
92  }
93  }
94 
95  /* 5. start the capture process */
96  ret = ff_v4l2_context_set_status(capture, VIDIOC_STREAMON);
97  if (ret) {
98  av_log(avctx, AV_LOG_DEBUG, "VIDIOC_STREAMON, on capture context\n");
99  return ret;
100  }
101 
102  return 0;
103 }
104 
106 {
107  struct v4l2_event_subscription sub;
108  V4L2Context *output = &s->output;
109  int ret;
110 
111  /**
112  * requirements
113  */
114  memset(&sub, 0, sizeof(sub));
115  sub.type = V4L2_EVENT_SOURCE_CHANGE;
116  ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
117  if ( ret < 0) {
118  if (output->height == 0 || output->width == 0) {
120  "the v4l2 driver does not support VIDIOC_SUBSCRIBE_EVENT\n"
121  "you must provide codec_height and codec_width on input\n");
122  return ret;
123  }
124  }
125 
126  memset(&sub, 0, sizeof(sub));
127  sub.type = V4L2_EVENT_EOS;
128  ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
129  if (ret < 0)
131  "the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT\n");
132 
133  return 0;
134 }
135 
137 {
138  V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context;
139  V4L2Context *const capture = &s->capture;
140  V4L2Context *const output = &s->output;
141  AVPacket avpkt = {0};
142  int ret;
143 
144  if (s->buf_pkt.size) {
145  avpkt = s->buf_pkt;
146  memset(&s->buf_pkt, 0, sizeof(AVPacket));
147  } else {
148  ret = ff_decode_get_packet(avctx, &avpkt);
149  if (ret < 0 && ret != AVERROR_EOF)
150  return ret;
151  }
152 
153  if (s->draining)
154  goto dequeue;
155 
156  ret = ff_v4l2_context_enqueue_packet(output, &avpkt);
157  if (ret < 0) {
158  if (ret != AVERROR(EAGAIN))
159  return ret;
160 
161  s->buf_pkt = avpkt;
162  /* no input buffers available, continue dequeing */
163  }
164 
165  if (avpkt.size) {
166  ret = v4l2_try_start(avctx);
167  if (ret) {
168  av_packet_unref(&avpkt);
169 
170  /* cant recover */
171  if (ret == AVERROR(ENOMEM))
172  return ret;
173 
174  return 0;
175  }
176  }
177 
178 dequeue:
179  if (!s->buf_pkt.size)
180  av_packet_unref(&avpkt);
181  return ff_v4l2_context_dequeue_frame(capture, frame, -1);
182 }
183 
185 {
186  V4L2Context *capture, *output;
187  V4L2m2mContext *s;
188  V4L2m2mPriv *priv = avctx->priv_data;
189  int ret;
190 
191  ret = ff_v4l2_m2m_create_context(priv, &s);
192  if (ret < 0)
193  return ret;
194 
195  capture = &s->capture;
196  output = &s->output;
197 
198  /* if these dimensions are invalid (ie, 0 or too small) an event will be raised
199  * by the v4l2 driver; this event will trigger a full pipeline reconfig and
200  * the proper values will be retrieved from the kernel driver.
201  */
202  output->height = capture->height = avctx->coded_height;
203  output->width = capture->width = avctx->coded_width;
204 
205  output->av_codec_id = avctx->codec_id;
206  output->av_pix_fmt = AV_PIX_FMT_NONE;
207 
209  capture->av_pix_fmt = avctx->pix_fmt;
210 
211  s->avctx = avctx;
212  ret = ff_v4l2_m2m_codec_init(priv);
213  if (ret) {
214  av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n");
215  s->self_ref = NULL;
217 
218  return ret;
219  }
220 
221  return v4l2_prepare_decoder(s);
222 }
223 
225 {
226  V4L2m2mPriv *priv = avctx->priv_data;
227  V4L2m2mContext *s = priv->context;
229  return ff_v4l2_m2m_codec_end(priv);
230 }
231 
232 #define OFFSET(x) offsetof(V4L2m2mPriv, x)
233 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
234 
235 static const AVOption options[] = {
237  { "num_capture_buffers", "Number of buffers in the capture context",
238  OFFSET(num_capture_buffers), AV_OPT_TYPE_INT, {.i64 = 20}, 20, INT_MAX, FLAGS },
239  { NULL},
240 };
241 
242 #define M2MDEC_CLASS(NAME) \
243  static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \
244  .class_name = #NAME "_v4l2m2m_decoder", \
245  .item_name = av_default_item_name, \
246  .option = options, \
247  .version = LIBAVUTIL_VERSION_INT, \
248  };
249 
250 #define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
251  M2MDEC_CLASS(NAME) \
252  AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
253  .name = #NAME "_v4l2m2m" , \
254  .long_name = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder wrapper"), \
255  .type = AVMEDIA_TYPE_VIDEO, \
256  .id = CODEC , \
257  .priv_data_size = sizeof(V4L2m2mPriv), \
258  .priv_class = &v4l2_m2m_ ## NAME ## _dec_class, \
259  .init = v4l2_decode_init, \
260  .receive_frame = v4l2_receive_frame, \
261  .close = v4l2_decode_close, \
262  .bsfs = bsf_name, \
263  .capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
264  .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS, \
265  .wrapper_name = "v4l2m2m", \
266  }
267 
268 M2MDEC(h264, "H.264", AV_CODEC_ID_H264, "h264_mp4toannexb");
269 M2MDEC(hevc, "HEVC", AV_CODEC_ID_HEVC, "hevc_mp4toannexb");
270 M2MDEC(mpeg1, "MPEG1", AV_CODEC_ID_MPEG1VIDEO, NULL);
271 M2MDEC(mpeg2, "MPEG2", AV_CODEC_ID_MPEG2VIDEO, NULL);
272 M2MDEC(mpeg4, "MPEG4", AV_CODEC_ID_MPEG4, NULL);
273 M2MDEC(h263, "H.263", AV_CODEC_ID_H263, NULL);
274 M2MDEC(vc1 , "VC1", AV_CODEC_ID_VC1, NULL);
275 M2MDEC(vp8, "VP8", AV_CODEC_ID_VP8, NULL);
276 M2MDEC(vp9, "VP9", AV_CODEC_ID_VP9, NULL);
enum AVPixelFormat ff_v4l2_format_v4l2_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
Definition: v4l2_fmt.c:132
#define NULL
Definition: coverity.c:32
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:126
static const AVOption options[]
Definition: v4l2_m2m_dec.c:235
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVOption.
Definition: opt.h:246
AVCodecContext * avctx
Definition: v4l2_m2m.h:52
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int ff_v4l2_context_init(V4L2Context *ctx)
Initializes a V4L2Context.
Definition: v4l2_context.c:722
int size
Definition: packet.h:356
#define FLAGS
Definition: v4l2_m2m_dec.c:233
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int width
Width and height of the frames it produces (in case of a capture context, e.g.
Definition: v4l2_context.h:71
int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv)
Releases all the codec resources if all AVBufferRefs have been returned to the ctx.
Definition: v4l2_m2m.c:336
int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
Probes the video nodes looking for the required codec capabilities.
Definition: v4l2_m2m.c:357
enum AVCodecID av_codec_id
AVCodecID corresponding to this buffer context.
Definition: v4l2_context.h:59
int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
Allocate a new context and references for a V4L2 M2M instance.
Definition: v4l2_m2m.c:395
#define av_cold
Definition: attributes.h:88
AVOptions.
#define OFFSET(x)
Definition: v4l2_m2m_dec.c:232
static int v4l2_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: v4l2_m2m_dec.c:136
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:238
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define av_log(a,...)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
V4L2m2mContext * context
Definition: v4l2_m2m.h:71
enum AVPixelFormat av_pix_fmt
AVPixelFormat corresponding to this buffer context.
Definition: v4l2_context.h:53
V4L2Buffer * buffers
Indexed array of V4L2Buffers.
Definition: v4l2_context.h:77
int streamon
Whether the stream has been started (VIDIOC_STREAMON has been sent).
Definition: v4l2_context.h:87
int ff_v4l2_context_enqueue_packet(V4L2Context *ctx, const AVPacket *pkt)
Enqueues a buffer to a V4L2Context from an AVPacket.
Definition: v4l2_context.c:611
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
struct v4l2_format format
Format returned by the driver after initializing the buffer context.
Definition: v4l2_context.h:65
int ff_v4l2_context_set_status(V4L2Context *ctx, uint32_t cmd)
Sets the status of a V4L2Context.
Definition: v4l2_context.c:572
static int v4l2_try_start(AVCodecContext *avctx)
Definition: v4l2_m2m_dec.c:37
V4L2Context capture
Definition: v4l2_m2m.h:48
Libavcodec external API header.
enum AVCodecID codec_id
Definition: avcodec.h:536
main external API structure.
Definition: avcodec.h:526
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:604
int ff_v4l2_context_dequeue_frame(V4L2Context *ctx, AVFrame *frame, int timeout)
Dequeues a buffer from a V4L2Context to an AVFrame.
Definition: v4l2_context.c:636
int coded_height
Definition: avcodec.h:714
#define M2MDEC(NAME, LONGNAME, CODEC, bsf_name)
Definition: v4l2_m2m_dec.c:250
static int v4l2_prepare_decoder(V4L2m2mContext *s)
Definition: v4l2_m2m_dec.c:105
AVPacket buf_pkt
Definition: v4l2_m2m.h:59
static av_cold int v4l2_decode_init(AVCodecContext *avctx)
Definition: v4l2_m2m_dec.c:184
V4L2Context output
Definition: v4l2_m2m.h:49
static av_cold int v4l2_decode_close(AVCodecContext *avctx)
Definition: v4l2_m2m_dec.c:224
common internal api header.
AVBufferRef * self_ref
Definition: v4l2_m2m.h:62
void * priv_data
Definition: avcodec.h:553
pixel format definitions
#define V4L_M2M_DEFAULT_OPTS
Definition: v4l2_m2m.h:39
AVBufferRef * context_ref
Definition: v4l2_m2m.h:72
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
Definition: packet.h:332
enum v4l2_buf_type type
Type of this buffer context.
Definition: v4l2_context.h:47