FFmpeg
encode.c
Go to the documentation of this file.
1 /*
2  * generic encoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/attributes.h"
22 #include "libavutil/avassert.h"
24 #include "libavutil/frame.h"
25 #include "libavutil/imgutils.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/samplefmt.h"
28 
29 #include "avcodec.h"
30 #include "encode.h"
31 #include "frame_thread_encoder.h"
32 #include "internal.h"
33 
34 int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
35 {
36  if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
37  av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
39  return AVERROR(EINVAL);
40  }
41 
42  av_assert0(!avpkt->data);
43 
45  &avctx->internal->byte_buffer_size, size);
46  avpkt->data = avctx->internal->byte_buffer;
47  if (!avpkt->data) {
48  av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
49  return AVERROR(ENOMEM);
50  }
51  avpkt->size = size;
52 
53  return 0;
54 }
55 
57 {
58  int ret;
59 
60  if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
61  return AVERROR(EINVAL);
62 
63  if (avpkt->data || avpkt->buf) {
64  av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n");
65  return AVERROR(EINVAL);
66  }
67 
69  if (ret < 0) {
70  av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size);
71  return ret;
72  }
73  avpkt->data = avpkt->buf->data;
74 
75  return 0;
76 }
77 
78 int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
79 {
80  int ret;
81 
82  if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
83  return AVERROR(EINVAL);
84 
85  av_assert0(!avpkt->data && !avpkt->buf);
86 
87  avpkt->size = size;
88  ret = avctx->get_encode_buffer(avctx, avpkt, flags);
89  if (ret < 0)
90  goto fail;
91 
92  if (!avpkt->data || !avpkt->buf) {
93  av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
94  ret = AVERROR(EINVAL);
95  goto fail;
96  }
97  memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
98 
99  ret = 0;
100 fail:
101  if (ret < 0) {
102  av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
103  av_packet_unref(avpkt);
104  }
105 
106  return ret;
107 }
108 
109 /**
110  * Pad last frame with silence.
111  */
113 {
114  int ret;
115 
116  frame->format = src->format;
117  frame->channel_layout = src->channel_layout;
118  frame->channels = src->channels;
119  frame->nb_samples = s->frame_size;
121  if (ret < 0)
122  goto fail;
123 
125  if (ret < 0)
126  goto fail;
127 
128  if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
129  src->nb_samples, s->channels, s->sample_fmt)) < 0)
130  goto fail;
131  if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
132  frame->nb_samples - src->nb_samples,
133  s->channels, s->sample_fmt)) < 0)
134  goto fail;
135 
136  return 0;
137 
138 fail:
140  return ret;
141 }
142 
143 int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
144  const AVSubtitle *sub)
145 {
146  int ret;
147  if (sub->start_display_time) {
148  av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
149  return -1;
150  }
151 
152  ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
153  avctx->frame_number++;
154  return ret;
155 }
156 
158 {
159  AVCodecInternal *avci = avctx->internal;
160 
161  if (avci->draining)
162  return AVERROR_EOF;
163 
164  if (!avci->buffer_frame->buf[0])
165  return AVERROR(EAGAIN);
166 
168 
169  return 0;
170 }
171 
173 {
174  AVCodecInternal *avci = avctx->internal;
175  EncodeSimpleContext *es = &avci->es;
176  AVFrame *frame = es->in_frame;
177  int got_packet;
178  int ret;
179 
180  if (avci->draining_done)
181  return AVERROR_EOF;
182 
183  if (!frame->buf[0] && !avci->draining) {
185  ret = ff_encode_get_frame(avctx, frame);
186  if (ret < 0 && ret != AVERROR_EOF)
187  return ret;
188  }
189 
190  if (!frame->buf[0]) {
191  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
193  return AVERROR_EOF;
194 
195  // Flushing is signaled with a NULL frame
196  frame = NULL;
197  }
198 
199  got_packet = 0;
200 
201  av_assert0(avctx->codec->encode2);
202 
203  if (CONFIG_FRAME_THREAD_ENCODER &&
205  /* This might modify frame, but it doesn't matter, because
206  * the frame properties used below are not used for video
207  * (due to the delay inherent in frame threaded encoding, it makes
208  * no sense to use the properties of the current frame anyway). */
209  ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
210  else {
211  ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
212  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
214  avpkt->pts = avpkt->dts = frame->pts;
215  }
216 
217  av_assert0(ret <= 0);
218 
219  emms_c();
220 
221  if (!ret && got_packet) {
222  if (avpkt->data) {
224  if (ret < 0)
225  goto end;
226  }
227 
228  if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
229  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
230  if (avpkt->pts == AV_NOPTS_VALUE)
231  avpkt->pts = frame->pts;
232  if (!avpkt->duration)
233  avpkt->duration = ff_samples_to_time_base(avctx,
234  frame->nb_samples);
235  }
236  }
237  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
238  /* NOTE: if we add any audio encoders which output non-keyframe packets,
239  * this needs to be moved to the encoders, but for now we can do it
240  * here to simplify things */
241  avpkt->flags |= AV_PKT_FLAG_KEY;
242  avpkt->dts = avpkt->pts;
243  }
244  }
245 
246  if (avci->draining && !got_packet)
247  avci->draining_done = 1;
248 
249 end:
250  if (ret < 0 || !got_packet)
251  av_packet_unref(avpkt);
252 
253  if (frame) {
254  if (!ret)
255  avctx->frame_number++;
257  }
258 
259  if (got_packet)
260  // Encoders must always return ref-counted buffers.
261  // Side-data only packets have no data and can be not ref-counted.
262  av_assert0(!avpkt->data || avpkt->buf);
263 
264  return ret;
265 }
266 
268 {
269  int ret;
270 
271  while (!avpkt->data && !avpkt->side_data) {
272  ret = encode_simple_internal(avctx, avpkt);
273  if (ret < 0)
274  return ret;
275  }
276 
277  return 0;
278 }
279 
281 {
282  AVCodecInternal *avci = avctx->internal;
283  int ret;
284 
285  if (avci->draining_done)
286  return AVERROR_EOF;
287 
288  av_assert0(!avpkt->data && !avpkt->side_data);
289 
290  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
291  if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
292  avctx->stats_out[0] = '\0';
293  if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
294  return AVERROR(EINVAL);
295  }
296 
297  if (avctx->codec->receive_packet) {
298  ret = avctx->codec->receive_packet(avctx, avpkt);
299  if (ret < 0)
300  av_packet_unref(avpkt);
301  else
302  // Encoders must always return ref-counted buffers.
303  // Side-data only packets have no data and can be not ref-counted.
304  av_assert0(!avpkt->data || avpkt->buf);
305  } else
306  ret = encode_simple_receive_packet(avctx, avpkt);
307 
308  if (ret == AVERROR_EOF)
309  avci->draining_done = 1;
310 
311  return ret;
312 }
313 
315 {
316  AVCodecInternal *avci = avctx->internal;
317  AVFrame *dst = avci->buffer_frame;
318  int ret;
319 
320  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
321  /* extract audio service type metadata */
323  if (sd && sd->size >= sizeof(enum AVAudioServiceType))
324  avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
325 
326  /* check for valid frame size */
328  if (src->nb_samples > avctx->frame_size) {
329  av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n");
330  return AVERROR(EINVAL);
331  }
332  } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
333  /* if we already got an undersized frame, that must have been the last */
334  if (avctx->internal->last_audio_frame) {
335  av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
336  return AVERROR(EINVAL);
337  }
338 
339  if (src->nb_samples < avctx->frame_size) {
340  ret = pad_last_frame(avctx, dst, src);
341  if (ret < 0)
342  return ret;
343 
344  avctx->internal->last_audio_frame = 1;
345  } else if (src->nb_samples > avctx->frame_size) {
346  av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size);
347  return AVERROR(EINVAL);
348  }
349  }
350  }
351 
352  if (!dst->data[0]) {
353  ret = av_frame_ref(dst, src);
354  if (ret < 0)
355  return ret;
356  }
357 
358  return 0;
359 }
360 
361 int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
362 {
363  AVCodecInternal *avci = avctx->internal;
364  int ret;
365 
366  if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
367  return AVERROR(EINVAL);
368 
369  if (avci->draining)
370  return AVERROR_EOF;
371 
372  if (avci->buffer_frame->data[0])
373  return AVERROR(EAGAIN);
374 
375  if (!frame) {
376  avci->draining = 1;
377  } else {
379  if (ret < 0)
380  return ret;
381  }
382 
383  if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
385  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
386  return ret;
387  }
388 
389  return 0;
390 }
391 
392 int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
393 {
394  AVCodecInternal *avci = avctx->internal;
395  int ret;
396 
397  av_packet_unref(avpkt);
398 
399  if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
400  return AVERROR(EINVAL);
401 
402  if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
403  av_packet_move_ref(avpkt, avci->buffer_pkt);
404  } else {
405  ret = encode_receive_packet_internal(avctx, avpkt);
406  if (ret < 0)
407  return ret;
408  }
409 
410  return 0;
411 }
412 
414 {
415  int i;
416 
417  if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
418  av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
419  return AVERROR(EINVAL);
420  }
421 
422  if (avctx->codec->sample_fmts) {
423  for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
424  if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
425  break;
426  if (avctx->channels == 1 &&
429  avctx->sample_fmt = avctx->codec->sample_fmts[i];
430  break;
431  }
432  }
433  if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
434  char buf[128];
435  snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
436  av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
437  (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
438  return AVERROR(EINVAL);
439  }
440  }
441  if (avctx->codec->pix_fmts) {
442  for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
443  if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
444  break;
445  if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
446  char buf[128];
447  snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
448  av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
449  (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
450  return AVERROR(EINVAL);
451  }
452  if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
453  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
454  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
455  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
456  avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
457  avctx->color_range = AVCOL_RANGE_JPEG;
458  }
459  if (avctx->codec->supported_samplerates) {
460  for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
461  if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
462  break;
463  if (avctx->codec->supported_samplerates[i] == 0) {
464  av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
465  avctx->sample_rate);
466  return AVERROR(EINVAL);
467  }
468  }
469  if (avctx->sample_rate < 0) {
470  av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
471  avctx->sample_rate);
472  return AVERROR(EINVAL);
473  }
474  if (avctx->codec->channel_layouts) {
475  if (!avctx->channel_layout) {
476  av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
477  } else {
478  for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
479  if (avctx->channel_layout == avctx->codec->channel_layouts[i])
480  break;
481  if (avctx->codec->channel_layouts[i] == 0) {
482  char buf[512];
483  av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
484  av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
485  return AVERROR(EINVAL);
486  }
487  }
488  }
489  if (avctx->channel_layout && avctx->channels) {
491  if (channels != avctx->channels) {
492  char buf[512];
493  av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
494  av_log(avctx, AV_LOG_ERROR,
495  "Channel layout '%s' with %d channels does not match number of specified channels %d\n",
496  buf, channels, avctx->channels);
497  return AVERROR(EINVAL);
498  }
499  } else if (avctx->channel_layout) {
501  }
502  if (avctx->channels < 0) {
503  av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
504  avctx->channels);
505  return AVERROR(EINVAL);
506  }
507  if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
508  const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
509  if ( avctx->bits_per_raw_sample < 0
510  || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
511  av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
512  avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
513  avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
514  }
515  if (avctx->width <= 0 || avctx->height <= 0) {
516  av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
517  return AVERROR(EINVAL);
518  }
519  }
520  if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
521  && avctx->bit_rate>0 && avctx->bit_rate<1000) {
522  av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
523  }
524 
525  if (!avctx->rc_initial_buffer_occupancy)
526  avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
527 
528  if (avctx->ticks_per_frame && avctx->time_base.num &&
529  avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
530  av_log(avctx, AV_LOG_ERROR,
531  "ticks_per_frame %d too large for the timebase %d/%d.",
532  avctx->ticks_per_frame,
533  avctx->time_base.num,
534  avctx->time_base.den);
535  return AVERROR(EINVAL);
536  }
537 
538  if (avctx->hw_frames_ctx) {
539  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
540  if (frames_ctx->format != avctx->pix_fmt) {
541  av_log(avctx, AV_LOG_ERROR,
542  "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
543  return AVERROR(EINVAL);
544  }
545  if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
546  avctx->sw_pix_fmt != frames_ctx->sw_format) {
547  av_log(avctx, AV_LOG_ERROR,
548  "Mismatching AVCodecContext.sw_pix_fmt (%s) "
549  "and AVHWFramesContext.sw_format (%s)\n",
551  av_get_pix_fmt_name(frames_ctx->sw_format));
552  return AVERROR(EINVAL);
553  }
554  avctx->sw_pix_fmt = frames_ctx->sw_format;
555  }
556 
557  return 0;
558 }
AVSubtitle
Definition: avcodec.h:2283
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:143
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1008
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:404
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
avcodec_receive_packet
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:392
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::audio_service_type
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:1053
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1039
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:246
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:988
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AVCodecInternal::es
EncodeSimpleContext es
Definition: internal.h:167
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
AVCodec::pix_fmts
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:218
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:217
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:216
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVCodecInternal::frame_thread_encoder
void * frame_thread_encoder
Definition: internal.h:165
encode.h
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:391
avcodec_is_open
int avcodec_is_open(AVCodecContext *s)
Definition: avcodec.c:715
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:477
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:404
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
EncodeSimpleContext::in_frame
AVFrame * in_frame
Definition: internal.h:127
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:388
fail
#define fail()
Definition: checkasm.h:136
AVCodec::sample_fmts
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:220
encode_receive_packet_internal
static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
Definition: encode.c:280
AVCodec::encode_sub
int(* encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size, const struct AVSubtitle *sub)
Definition: codec.h:279
samplefmt.h
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:459
AVRational::num
int num
Numerator.
Definition: rational.h:59
av_image_check_size2
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:289
av_get_planar_sample_fmt
enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt)
Get the planar alternative form of the given sample format.
Definition: samplefmt.c:84
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
avassert.h
ff_samples_to_time_base
static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx, int64_t samples)
Rescale from sample rate to AVCodecContext.time_base.
Definition: internal.h:240
AVCodec::supported_samplerates
const int * supported_samplerates
array of supported audio samplerates, or NULL if unknown, array is terminated by 0
Definition: codec.h:219
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
encode_send_frame_internal
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
Definition: encode.c:314
frame_thread_encoder.h
AVFrameSideData::size
size_t size
Definition: frame.h:212
encode_simple_internal
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
Definition: encode.c:172
AVCodecContext::rc_initial_buffer_occupancy
int rc_initial_buffer_occupancy
Number of bits which should be loaded into the rc buffer before decoding starts.
Definition: avcodec.h:1218
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodecInternal::buffer_pkt
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:193
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:511
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1421
channels
channels
Definition: aptx.h:33
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1880
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_thread_video_encode_frame
int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, AVFrame *frame, int *got_packet_ptr)
Definition: frame_thread_encoder.c:268
if
if(ret)
Definition: filter_design.txt:179
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1175
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:356
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:537
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:963
AVCodec::type
enum AVMediaType type
Definition: codec.h:210
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:414
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:429
src
#define src
Definition: vp8dsp.c:255
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:129
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:460
AVCodecInternal::draining_done
int draining_done
Definition: internal.h:195
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
AVAudioServiceType
AVAudioServiceType
Definition: defs.h:57
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:502
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1232
AVPacket::size
int size
Definition: packet.h:374
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:327
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:996
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
size
int size
Definition: twinvq_data.h:10344
AVCodec::encode2
int(* encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt, const struct AVFrame *frame, int *got_packet_ptr)
Encode data to an AVPacket.
Definition: codec.h:291
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVCodec::receive_packet
int(* receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt)
Encode API with decoupled frame/packet dataflow.
Definition: codec.h:313
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:162
frame.h
EncodeSimpleContext
Definition: internal.h:126
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
attributes.h
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:466
AVCodecInternal
Definition: internal.h:130
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:163
ff_encode_preinit
int ff_encode_preinit(AVCodecContext *avctx)
Definition: encode.c:413
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1447
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:989
av_codec_is_encoder
int av_codec_is_encoder(const AVCodec *codec)
Definition: utils.c:76
av_samples_copy
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:220
i
int i
Definition: input.c:406
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
internal.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:461
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:437
AVCodecContext::height
int height
Definition: avcodec.h:552
avcodec_send_frame
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:361
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:589
AVCodecInternal::last_audio_frame
int last_audio_frame
An audio frame with less than required samples has been submitted and padded with silence.
Definition: internal.h:143
AVCodecContext::hw_frames_ctx
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
Definition: avcodec.h:1853
av_samples_set_silence
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:244
avcodec.h
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVPacket::side_data
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:384
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AVCodecContext
main external API structure.
Definition: avcodec.h:379
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1455
channel_layout.h
ff_get_encode_buffer
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
Definition: encode.c:78
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::get_encode_buffer
int(* get_encode_buffer)(struct AVCodecContext *s, AVPacket *pkt, int flags)
This callback is called at the beginning of each packet to get a data buffer for it.
Definition: avcodec.h:2020
encode_simple_receive_packet
static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: encode.c:267
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:169
AVCodecInternal::buffer_frame
AVFrame * buffer_frame
Definition: internal.h:194
pad_last_frame
static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
Pad last frame with silence.
Definition: encode.c:112
AVCodecInternal::draining
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:188
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:387
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_encode_get_frame
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Definition: encode.c:157
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1019
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVPacket
This structure stores compressed data.
Definition: packet.h:350
avcodec_default_get_encode_buffer
int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags)
The default callback for AVCodecContext.get_encode_buffer().
Definition: encode.c:56
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:552
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::sw_pix_fmt
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1713
AV_CODEC_CAP_SMALL_LAST_FRAME
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
Definition: codec.h:82
snprintf
#define snprintf
Definition: snprintf.h:34
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:34
AVCodec::channel_layouts
const uint64_t * channel_layouts
array of support channel layouts, or NULL if unknown. array is terminated by 0
Definition: codec.h:221
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2460
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:231