FFmpeg
encode.c
Go to the documentation of this file.
1 /*
2  * generic encoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/attributes.h"
22 #include "libavutil/avassert.h"
23 #include "libavutil/frame.h"
24 #include "libavutil/imgutils.h"
25 #include "libavutil/internal.h"
26 #include "libavutil/samplefmt.h"
27 
28 #include "avcodec.h"
29 #include "encode.h"
30 #include "frame_thread_encoder.h"
31 #include "internal.h"
32 
33 int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
34 {
35  if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
36  av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
38  return AVERROR(EINVAL);
39  }
40 
41  av_assert0(!avpkt->data);
42 
43  if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
45  avpkt->data = avctx->internal->byte_buffer;
46  avpkt->size = size;
47  }
48 
49  if (!avpkt->data) {
50  int ret = av_new_packet(avpkt, size);
51  if (ret < 0)
52  av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
53  return ret;
54  }
55 
56  return 0;
57 }
58 
60 {
61  int ret;
62 
63  if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
64  return AVERROR(EINVAL);
65 
66  if (avpkt->data || avpkt->buf) {
67  av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n");
68  return AVERROR(EINVAL);
69  }
70 
72  if (ret < 0) {
73  av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size);
74  return ret;
75  }
76  avpkt->data = avpkt->buf->data;
77 
78  return 0;
79 }
80 
81 int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
82 {
83  int ret;
84 
85  if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
86  return AVERROR(EINVAL);
87 
88  av_assert0(!avpkt->data && !avpkt->buf);
89 
90  avpkt->size = size;
91  ret = avctx->get_encode_buffer(avctx, avpkt, flags);
92  if (ret < 0)
93  goto fail;
94 
95  if (!avpkt->data || !avpkt->buf) {
96  av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
97  ret = AVERROR(EINVAL);
98  goto fail;
99  }
100  memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
101 
102  ret = 0;
103 fail:
104  if (ret < 0) {
105  av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
106  av_packet_unref(avpkt);
107  }
108 
109  return ret;
110 }
111 
112 /**
113  * Pad last frame with silence.
114  */
116 {
117  int ret;
118 
119  frame->format = src->format;
120  frame->channel_layout = src->channel_layout;
121  frame->channels = src->channels;
122  frame->nb_samples = s->frame_size;
124  if (ret < 0)
125  goto fail;
126 
128  if (ret < 0)
129  goto fail;
130 
131  if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
132  src->nb_samples, s->channels, s->sample_fmt)) < 0)
133  goto fail;
134  if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
135  frame->nb_samples - src->nb_samples,
136  s->channels, s->sample_fmt)) < 0)
137  goto fail;
138 
139  return 0;
140 
141 fail:
143  return ret;
144 }
145 
146 int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
147  const AVSubtitle *sub)
148 {
149  int ret;
150  if (sub->start_display_time) {
151  av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
152  return -1;
153  }
154 
155  ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
156  avctx->frame_number++;
157  return ret;
158 }
159 
161 {
162  AVCodecInternal *avci = avctx->internal;
163 
164  if (avci->draining)
165  return AVERROR_EOF;
166 
167  if (!avci->buffer_frame->buf[0])
168  return AVERROR(EAGAIN);
169 
171 
172  return 0;
173 }
174 
176 {
177  AVCodecInternal *avci = avctx->internal;
178  EncodeSimpleContext *es = &avci->es;
179  AVFrame *frame = es->in_frame;
180  int got_packet;
181  int ret;
182 
183  if (avci->draining_done)
184  return AVERROR_EOF;
185 
186  if (!frame->buf[0] && !avci->draining) {
188  ret = ff_encode_get_frame(avctx, frame);
189  if (ret < 0 && ret != AVERROR_EOF)
190  return ret;
191  }
192 
193  if (!frame->buf[0]) {
194  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
196  return AVERROR_EOF;
197 
198  // Flushing is signaled with a NULL frame
199  frame = NULL;
200  }
201 
202  got_packet = 0;
203 
204  av_assert0(avctx->codec->encode2);
205 
206  if (CONFIG_FRAME_THREAD_ENCODER &&
208  /* This might modify frame, but it doesn't matter, because
209  * the frame properties used below are not used for video
210  * (due to the delay inherent in frame threaded encoding, it makes
211  * no sense to use the properties of the current frame anyway). */
212  ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
213  else {
214  ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
215  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
217  avpkt->pts = avpkt->dts = frame->pts;
218  }
219 
220  av_assert0(ret <= 0);
221 
222  emms_c();
223 
224  if (!ret && got_packet) {
225  if (avpkt->data) {
227  if (ret < 0)
228  goto end;
229  }
230 
231  if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
232  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
233  if (avpkt->pts == AV_NOPTS_VALUE)
234  avpkt->pts = frame->pts;
235  if (!avpkt->duration)
236  avpkt->duration = ff_samples_to_time_base(avctx,
237  frame->nb_samples);
238  }
239  }
240  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
241  /* NOTE: if we add any audio encoders which output non-keyframe packets,
242  * this needs to be moved to the encoders, but for now we can do it
243  * here to simplify things */
244  avpkt->flags |= AV_PKT_FLAG_KEY;
245  avpkt->dts = avpkt->pts;
246  }
247  }
248 
249  if (avci->draining && !got_packet)
250  avci->draining_done = 1;
251 
252 end:
253  if (ret < 0 || !got_packet)
254  av_packet_unref(avpkt);
255 
256  if (frame) {
257  if (!ret)
258  avctx->frame_number++;
260  }
261 
262  if (got_packet)
263  // Encoders must always return ref-counted buffers.
264  // Side-data only packets have no data and can be not ref-counted.
265  av_assert0(!avpkt->data || avpkt->buf);
266 
267  return ret;
268 }
269 
271 {
272  int ret;
273 
274  while (!avpkt->data && !avpkt->side_data) {
275  ret = encode_simple_internal(avctx, avpkt);
276  if (ret < 0)
277  return ret;
278  }
279 
280  return 0;
281 }
282 
284 {
285  AVCodecInternal *avci = avctx->internal;
286  int ret;
287 
288  if (avci->draining_done)
289  return AVERROR_EOF;
290 
291  av_assert0(!avpkt->data && !avpkt->side_data);
292 
293  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
294  if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
295  avctx->stats_out[0] = '\0';
296  if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
297  return AVERROR(EINVAL);
298  }
299 
300  if (avctx->codec->receive_packet) {
301  ret = avctx->codec->receive_packet(avctx, avpkt);
302  if (ret < 0)
303  av_packet_unref(avpkt);
304  else
305  // Encoders must always return ref-counted buffers.
306  // Side-data only packets have no data and can be not ref-counted.
307  av_assert0(!avpkt->data || avpkt->buf);
308  } else
309  ret = encode_simple_receive_packet(avctx, avpkt);
310 
311  if (ret == AVERROR_EOF)
312  avci->draining_done = 1;
313 
314  return ret;
315 }
316 
318 {
319  AVCodecInternal *avci = avctx->internal;
320  AVFrame *dst = avci->buffer_frame;
321  int ret;
322 
323  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
324  /* extract audio service type metadata */
326  if (sd && sd->size >= sizeof(enum AVAudioServiceType))
327  avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
328 
329  /* check for valid frame size */
331  if (src->nb_samples > avctx->frame_size) {
332  av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n");
333  return AVERROR(EINVAL);
334  }
335  } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
336  /* if we already got an undersized frame, that must have been the last */
337  if (avctx->internal->last_audio_frame) {
338  av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
339  return AVERROR(EINVAL);
340  }
341 
342  if (src->nb_samples < avctx->frame_size) {
343  ret = pad_last_frame(avctx, dst, src);
344  if (ret < 0)
345  return ret;
346 
347  avctx->internal->last_audio_frame = 1;
348  } else if (src->nb_samples > avctx->frame_size) {
349  av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size);
350  return AVERROR(EINVAL);
351  }
352  }
353  }
354 
355  if (!dst->data[0]) {
356  ret = av_frame_ref(dst, src);
357  if (ret < 0)
358  return ret;
359  }
360 
361  return 0;
362 }
363 
364 int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
365 {
366  AVCodecInternal *avci = avctx->internal;
367  int ret;
368 
369  if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
370  return AVERROR(EINVAL);
371 
372  if (avci->draining)
373  return AVERROR_EOF;
374 
375  if (avci->buffer_frame->data[0])
376  return AVERROR(EAGAIN);
377 
378  if (!frame) {
379  avci->draining = 1;
380  } else {
382  if (ret < 0)
383  return ret;
384  }
385 
386  if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
388  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
389  return ret;
390  }
391 
392  return 0;
393 }
394 
395 int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
396 {
397  AVCodecInternal *avci = avctx->internal;
398  int ret;
399 
400  av_packet_unref(avpkt);
401 
402  if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
403  return AVERROR(EINVAL);
404 
405  if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
406  av_packet_move_ref(avpkt, avci->buffer_pkt);
407  } else {
408  ret = encode_receive_packet_internal(avctx, avpkt);
409  if (ret < 0)
410  return ret;
411  }
412 
413  return 0;
414 }
415 
417 {
418  int i;
419 
420  if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
421  av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
422  return AVERROR(EINVAL);
423  }
424 
425  if (avctx->codec->sample_fmts) {
426  for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
427  if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
428  break;
429  if (avctx->channels == 1 &&
432  avctx->sample_fmt = avctx->codec->sample_fmts[i];
433  break;
434  }
435  }
436  if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
437  char buf[128];
438  snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
439  av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
440  (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
441  return AVERROR(EINVAL);
442  }
443  }
444  if (avctx->codec->pix_fmts) {
445  for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
446  if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
447  break;
448  if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
449  char buf[128];
450  snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
451  av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
452  (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
453  return AVERROR(EINVAL);
454  }
455  if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
456  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
457  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
458  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
459  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
460  avctx->color_range = AVCOL_RANGE_JPEG;
461  }
462  if (avctx->codec->supported_samplerates) {
463  for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
464  if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
465  break;
466  if (avctx->codec->supported_samplerates[i] == 0) {
467  av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
468  avctx->sample_rate);
469  return AVERROR(EINVAL);
470  }
471  }
472  if (avctx->sample_rate < 0) {
473  av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
474  avctx->sample_rate);
475  return AVERROR(EINVAL);
476  }
477  if (avctx->codec->channel_layouts) {
478  if (!avctx->channel_layout) {
479  av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
480  } else {
481  for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
482  if (avctx->channel_layout == avctx->codec->channel_layouts[i])
483  break;
484  if (avctx->codec->channel_layouts[i] == 0) {
485  char buf[512];
486  av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
487  av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
488  return AVERROR(EINVAL);
489  }
490  }
491  }
492  if (avctx->channel_layout && avctx->channels) {
494  if (channels != avctx->channels) {
495  char buf[512];
496  av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
497  av_log(avctx, AV_LOG_ERROR,
498  "Channel layout '%s' with %d channels does not match number of specified channels %d\n",
499  buf, channels, avctx->channels);
500  return AVERROR(EINVAL);
501  }
502  } else if (avctx->channel_layout) {
504  }
505  if (avctx->channels < 0) {
506  av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
507  avctx->channels);
508  return AVERROR(EINVAL);
509  }
510  if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
511  const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
512  if ( avctx->bits_per_raw_sample < 0
513  || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
514  av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
515  avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
516  avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
517  }
518  if (avctx->width <= 0 || avctx->height <= 0) {
519  av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
520  return AVERROR(EINVAL);
521  }
522  }
523  if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
524  && avctx->bit_rate>0 && avctx->bit_rate<1000) {
525  av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
526  }
527 
528  if (!avctx->rc_initial_buffer_occupancy)
529  avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
530 
531  if (avctx->ticks_per_frame && avctx->time_base.num &&
532  avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
533  av_log(avctx, AV_LOG_ERROR,
534  "ticks_per_frame %d too large for the timebase %d/%d.",
535  avctx->ticks_per_frame,
536  avctx->time_base.num,
537  avctx->time_base.den);
538  return AVERROR(EINVAL);
539  }
540 
541  if (avctx->hw_frames_ctx) {
542  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
543  if (frames_ctx->format != avctx->pix_fmt) {
544  av_log(avctx, AV_LOG_ERROR,
545  "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
546  return AVERROR(EINVAL);
547  }
548  if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
549  avctx->sw_pix_fmt != frames_ctx->sw_format) {
550  av_log(avctx, AV_LOG_ERROR,
551  "Mismatching AVCodecContext.sw_pix_fmt (%s) "
552  "and AVHWFramesContext.sw_format (%s)\n",
554  av_get_pix_fmt_name(frames_ctx->sw_format));
555  return AVERROR(EINVAL);
556  }
557  avctx->sw_pix_fmt = frames_ctx->sw_format;
558  }
559 
560  return 0;
561 }
AVSubtitle
Definition: avcodec.h:2389
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:146
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1118
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:403
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:187
avcodec_receive_packet
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::audio_service_type
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:1163
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1149
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:245
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1098
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:616
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:32
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2541
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:92
AVCodecInternal::es
EncodeSimpleContext es
Definition: internal.h:161
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
AVCodec::pix_fmts
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:218
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:217
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:216
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:365
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
AVCodecInternal::frame_thread_encoder
void * frame_thread_encoder
Definition: internal.h:159
encode.h
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:383
avcodec_is_open
int avcodec_is_open(AVCodecContext *s)
Definition: avcodec.c:722
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:477
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:396
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
EncodeSimpleContext::in_frame
AVFrame * in_frame
Definition: internal.h:121
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:510
fail
#define fail()
Definition: checkasm.h:134
AVCodec::sample_fmts
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:220
encode_receive_packet_internal
static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
Definition: encode.c:283
AVCodec::encode_sub
int(* encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size, const struct AVSubtitle *sub)
Definition: codec.h:279
samplefmt.h
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:581
AVRational::num
int num
Numerator.
Definition: rational.h:59
av_image_check_size2
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:286
av_get_planar_sample_fmt
enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt)
Get the planar alternative form of the given sample format.
Definition: samplefmt.c:84
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
avassert.h
ff_samples_to_time_base
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
Definition: internal.h:262
AVCodec::supported_samplerates
const int * supported_samplerates
array of supported audio samplerates, or NULL if unknown, array is terminated by 0
Definition: codec.h:219
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
encode_send_frame_internal
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
Definition: encode.c:317
frame_thread_encoder.h
AVFrameSideData::size
size_t size
Definition: frame.h:212
encode_simple_internal
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
Definition: encode.c:175
AVCodecContext::rc_initial_buffer_occupancy
int rc_initial_buffer_occupancy
Number of bits which should be loaded into the rc buffer before decoding starts.
Definition: avcodec.h:1328
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
av_new_packet
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:94
AVCodecInternal::buffer_pkt
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:187
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:633
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1531
channels
channels
Definition: aptx.h:33
AVAudioServiceType
AVAudioServiceType
Definition: avcodec.h:216
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1990
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_thread_video_encode_frame
int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, AVFrame *frame, int *got_packet_ptr)
Definition: frame_thread_encoder.c:278
if
if(ret)
Definition: filter_design.txt:179
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1285
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:348
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:536
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1073
AVCodec::type
enum AVMediaType type
Definition: codec.h:210
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:536
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:551
src
#define src
Definition: vp8dsp.c:255
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:129
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:459
AVCodecInternal::draining_done
int draining_done
Definition: internal.h:189
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:624
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1342
AVPacket::size
int size
Definition: packet.h:366
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:326
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1106
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
size
int size
Definition: twinvq_data.h:10344
AVCodec::encode2
int(* encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt, const struct AVFrame *frame, int *got_packet_ptr)
Encode data to an AVPacket.
Definition: codec.h:291
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVCodec::receive_packet
int(* receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt)
Encode API with decoupled frame/packet dataflow.
Definition: codec.h:313
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:156
frame.h
EncodeSimpleContext
Definition: internal.h:120
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:364
attributes.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:371
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:465
AVCodecInternal
Definition: internal.h:124
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:157
ff_encode_preinit
int ff_encode_preinit(AVCodecContext *avctx)
Definition: encode.c:416
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1557
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1099
av_codec_is_encoder
int av_codec_is_encoder(const AVCodec *codec)
Definition: utils.c:74
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
i
int i
Definition: input.c:407
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:358
internal.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:460
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:436
AVCodecContext::height
int height
Definition: avcodec.h:674
avcodec_send_frame
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:711
AVCodecInternal::last_audio_frame
int last_audio_frame
An audio frame with less than required samples has been submitted and padded with silence.
Definition: internal.h:137
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1963
av_samples_set_silence
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:237
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVPacket::side_data
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:376
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:192
AVCodecContext
main external API structure.
Definition: avcodec.h:501
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1565
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:81
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::get_encode_buffer
int(* get_encode_buffer)(struct AVCodecContext *s, AVPacket *pkt, int flags)
This callback is called at the beginning of each packet to get a data buffer for it.
Definition: avcodec.h:2126
encode_simple_receive_packet
static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: encode.c:270
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:106
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:169
AVCodecInternal::buffer_frame
AVFrame * buffer_frame
Definition: internal.h:188
pad_last_frame
static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
Pad last frame with silence.
Definition: encode.c:115
AVCodecInternal::draining
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:182
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:509
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:160
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1129
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:70
AVPacket
This structure stores compressed data.
Definition: packet.h:342
avcodec_default_get_encode_buffer
int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags)
The default callback for AVCodecContext.get_encode_buffer().
Definition: encode.c:59
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:674
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1823
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:82
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
snprintf
#define snprintf
Definition: snprintf.h:34
AVCodec::channel_layouts
const uint64_t * channel_layouts
array of support channel layouts, or NULL if unknown. array is terminated by 0
Definition: codec.h:221
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2461
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:273