FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  read(0, &ch, 1);
472  return ch;
473  }else{
474  return -1;
475  }
476  }
477 # endif
478  if(kbhit())
479  return(getch());
480 #endif
481  return -1;
482 }
483 
484 static int decode_interrupt_cb(void *ctx)
485 {
487 }
488 
490 
491 static void ffmpeg_cleanup(int ret)
492 {
493  int i, j;
494 
495  if (do_benchmark) {
496  int maxrss = getmaxrss() / 1024;
497  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498  }
499 
500  for (i = 0; i < nb_filtergraphs; i++) {
501  FilterGraph *fg = filtergraphs[i];
503  for (j = 0; j < fg->nb_inputs; j++) {
504  InputFilter *ifilter = fg->inputs[j];
505  struct InputStream *ist = ifilter->ist;
506 
507  while (av_fifo_size(ifilter->frame_queue)) {
508  AVFrame *frame;
510  sizeof(frame), NULL);
512  }
513  av_fifo_freep(&ifilter->frame_queue);
514  if (ist->sub2video.sub_queue) {
515  while (av_fifo_size(ist->sub2video.sub_queue)) {
516  AVSubtitle sub;
518  &sub, sizeof(sub), NULL);
519  avsubtitle_free(&sub);
520  }
522  }
523  av_buffer_unref(&ifilter->hw_frames_ctx);
524  av_freep(&ifilter->name);
525  av_freep(&fg->inputs[j]);
526  }
527  av_freep(&fg->inputs);
528  for (j = 0; j < fg->nb_outputs; j++) {
529  OutputFilter *ofilter = fg->outputs[j];
530 
531  avfilter_inout_free(&ofilter->out_tmp);
532  av_freep(&ofilter->name);
533  av_freep(&ofilter->formats);
534  av_freep(&ofilter->channel_layouts);
535  av_freep(&ofilter->sample_rates);
536  av_freep(&fg->outputs[j]);
537  }
538  av_freep(&fg->outputs);
539  av_freep(&fg->graph_desc);
540 
542  }
544 
546 
547  /* close files */
548  for (i = 0; i < nb_output_files; i++) {
549  OutputFile *of = output_files[i];
551  if (!of)
552  continue;
553  s = of->ctx;
554  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
555  avio_closep(&s->pb);
557  av_dict_free(&of->opts);
558 
560  }
561  for (i = 0; i < nb_output_streams; i++) {
563 
564  if (!ost)
565  continue;
566 
567  av_bsf_free(&ost->bsf_ctx);
568 
569  av_frame_free(&ost->filtered_frame);
570  av_frame_free(&ost->last_frame);
571  av_dict_free(&ost->encoder_opts);
572 
573  av_freep(&ost->forced_keyframes);
574  av_expr_free(ost->forced_keyframes_pexpr);
575  av_freep(&ost->avfilter);
576  av_freep(&ost->logfile_prefix);
577 
578  av_freep(&ost->audio_channels_map);
579  ost->audio_channels_mapped = 0;
580 
581  av_dict_free(&ost->sws_dict);
582  av_dict_free(&ost->swr_opts);
583 
584  avcodec_free_context(&ost->enc_ctx);
585  avcodec_parameters_free(&ost->ref_par);
586 
587  if (ost->muxing_queue) {
588  while (av_fifo_size(ost->muxing_queue)) {
589  AVPacket pkt;
590  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
592  }
593  av_fifo_freep(&ost->muxing_queue);
594  }
595 
597  }
598 #if HAVE_THREADS
599  free_input_threads();
600 #endif
601  for (i = 0; i < nb_input_files; i++) {
604  }
605  for (i = 0; i < nb_input_streams; i++) {
606  InputStream *ist = input_streams[i];
607 
610  av_dict_free(&ist->decoder_opts);
613  av_freep(&ist->filters);
614  av_freep(&ist->hwaccel_device);
615  av_freep(&ist->dts_buffer);
616 
618 
620  }
621 
622  if (vstats_file) {
623  if (fclose(vstats_file))
625  "Error closing vstats file, loss of information possible: %s\n",
626  av_err2str(AVERROR(errno)));
627  }
629 
634 
635  uninit_opts();
636 
638 
639  if (received_sigterm) {
640  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
641  (int) received_sigterm);
642  } else if (ret && atomic_load(&transcode_init_done)) {
643  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
644  }
645  term_exit();
646  ffmpeg_exited = 1;
647 }
648 
650 {
651  AVDictionaryEntry *t = NULL;
652 
653  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
655  }
656 }
657 
659 {
661  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
662  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
663  exit_program(1);
664  }
665 }
666 
667 static void abort_codec_experimental(AVCodec *c, int encoder)
668 {
669  exit_program(1);
670 }
671 
672 static void update_benchmark(const char *fmt, ...)
673 {
674  if (do_benchmark_all) {
676  va_list va;
677  char buf[1024];
678 
679  if (fmt) {
680  va_start(va, fmt);
681  vsnprintf(buf, sizeof(buf), fmt, va);
682  va_end(va);
684  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
687  t.real_usec - current_time.real_usec, buf);
688  }
689  current_time = t;
690  }
691 }
692 
694 {
695  int i;
696  for (i = 0; i < nb_output_streams; i++) {
697  OutputStream *ost2 = output_streams[i];
698  ost2->finished |= ost == ost2 ? this_stream : others;
699  }
700 }
701 
702 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
703 {
704  AVFormatContext *s = of->ctx;
705  AVStream *st = ost->st;
706  int ret;
707 
708  /*
709  * Audio encoders may split the packets -- #frames in != #packets out.
710  * But there is no reordering, so we can limit the number of output packets
711  * by simply dropping them here.
712  * Counting encoded video frames needs to be done separately because of
713  * reordering, see do_video_out().
714  * Do not count the packet when unqueued because it has been counted when queued.
715  */
716  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
717  if (ost->frame_number >= ost->max_frames) {
719  return;
720  }
721  ost->frame_number++;
722  }
723 
724  if (!of->header_written) {
725  AVPacket tmp_pkt = {0};
726  /* the muxer is not initialized yet, buffer the packet */
727  if (!av_fifo_space(ost->muxing_queue)) {
728  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
729  ost->max_muxing_queue_size);
730  if (new_size <= av_fifo_size(ost->muxing_queue)) {
732  "Too many packets buffered for output stream %d:%d.\n",
733  ost->file_index, ost->st->index);
734  exit_program(1);
735  }
736  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
737  if (ret < 0)
738  exit_program(1);
739  }
741  if (ret < 0)
742  exit_program(1);
743  av_packet_move_ref(&tmp_pkt, pkt);
744  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
745  return;
746  }
747 
750  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
751 
753  int i;
755  NULL);
756  ost->quality = sd ? AV_RL32(sd) : -1;
757  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
758 
759  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
760  if (sd && i < sd[5])
761  ost->error[i] = AV_RL64(sd + 8 + 8*i);
762  else
763  ost->error[i] = -1;
764  }
765 
766  if (ost->frame_rate.num && ost->is_cfr) {
767  if (pkt->duration > 0)
768  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
769  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
770  ost->mux_timebase);
771  }
772  }
773 
774  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
775 
776  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
777  if (pkt->dts != AV_NOPTS_VALUE &&
778  pkt->pts != AV_NOPTS_VALUE &&
779  pkt->dts > pkt->pts) {
780  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
781  pkt->dts, pkt->pts,
782  ost->file_index, ost->st->index);
783  pkt->pts =
784  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
785  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
786  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
787  }
789  pkt->dts != AV_NOPTS_VALUE &&
790  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
791  ost->last_mux_dts != AV_NOPTS_VALUE) {
792  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
793  if (pkt->dts < max) {
794  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
795  if (exit_on_error)
796  loglevel = AV_LOG_ERROR;
797  av_log(s, loglevel, "Non-monotonous DTS in output stream "
798  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
799  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
800  if (exit_on_error) {
801  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
802  exit_program(1);
803  }
804  av_log(s, loglevel, "changing to %"PRId64". This may result "
805  "in incorrect timestamps in the output file.\n",
806  max);
807  if (pkt->pts >= pkt->dts)
808  pkt->pts = FFMAX(pkt->pts, max);
809  pkt->dts = max;
810  }
811  }
812  }
813  ost->last_mux_dts = pkt->dts;
814 
815  ost->data_size += pkt->size;
816  ost->packets_written++;
817 
819 
820  if (debug_ts) {
821  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
822  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
823  av_get_media_type_string(ost->enc_ctx->codec_type),
826  pkt->size
827  );
828  }
829 
831  if (ret < 0) {
832  print_error("av_interleaved_write_frame()", ret);
833  main_return_code = 1;
835  }
837 }
838 
840 {
841  OutputFile *of = output_files[ost->file_index];
842 
843  ost->finished |= ENCODER_FINISHED;
844  if (of->shortest) {
845  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
847  }
848 }
849 
850 /*
851  * Send a single packet to the output, applying any bitstream filters
852  * associated with the output stream. This may result in any number
853  * of packets actually being written, depending on what bitstream
854  * filters are applied. The supplied packet is consumed and will be
855  * blank (as if newly-allocated) when this function returns.
856  *
857  * If eof is set, instead indicate EOF to all bitstream filters and
858  * therefore flush any delayed packets to the output. A blank packet
859  * must be supplied in this case.
860  */
862  OutputStream *ost, int eof)
863 {
864  int ret = 0;
865 
866  /* apply the output bitstream filters */
867  if (ost->bsf_ctx) {
868  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
869  if (ret < 0)
870  goto finish;
871  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
872  write_packet(of, pkt, ost, 0);
873  if (ret == AVERROR(EAGAIN))
874  ret = 0;
875  } else if (!eof)
876  write_packet(of, pkt, ost, 0);
877 
878 finish:
879  if (ret < 0 && ret != AVERROR_EOF) {
880  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
882  if(exit_on_error)
883  exit_program(1);
884  }
885 }
886 
888 {
889  OutputFile *of = output_files[ost->file_index];
890 
891  if (of->recording_time != INT64_MAX &&
892  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
893  AV_TIME_BASE_Q) >= 0) {
895  return 0;
896  }
897  return 1;
898 }
899 
901  AVFrame *frame)
902 {
903  AVCodecContext *enc = ost->enc_ctx;
904  AVPacket pkt;
905  int ret;
906 
908  pkt.data = NULL;
909  pkt.size = 0;
910 
912  return;
913 
914  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
915  frame->pts = ost->sync_opts;
916  ost->sync_opts = frame->pts + frame->nb_samples;
917  ost->samples_encoded += frame->nb_samples;
918  ost->frames_encoded++;
919 
920  av_assert0(pkt.size || !pkt.data);
922  if (debug_ts) {
923  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
924  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
925  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926  enc->time_base.num, enc->time_base.den);
927  }
928 
929  ret = avcodec_send_frame(enc, frame);
930  if (ret < 0)
931  goto error;
932 
933  while (1) {
934  ret = avcodec_receive_packet(enc, &pkt);
935  if (ret == AVERROR(EAGAIN))
936  break;
937  if (ret < 0)
938  goto error;
939 
940  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941 
942  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
943 
944  if (debug_ts) {
945  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
946  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
949  }
950 
951  output_packet(of, &pkt, ost, 0);
952  }
953 
954  return;
955 error:
956  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
957  exit_program(1);
958 }
959 
960 static void do_subtitle_out(OutputFile *of,
961  OutputStream *ost,
962  AVSubtitle *sub)
963 {
964  int subtitle_out_max_size = 1024 * 1024;
965  int subtitle_out_size, nb, i;
966  AVCodecContext *enc;
967  AVPacket pkt;
968  int64_t pts;
969 
970  if (sub->pts == AV_NOPTS_VALUE) {
971  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972  if (exit_on_error)
973  exit_program(1);
974  return;
975  }
976 
977  enc = ost->enc_ctx;
978 
979  if (!subtitle_out) {
980  subtitle_out = av_malloc(subtitle_out_max_size);
981  if (!subtitle_out) {
982  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
983  exit_program(1);
984  }
985  }
986 
987  /* Note: DVB subtitle need one packet to draw them and one other
988  packet to clear them */
989  /* XXX: signal it in the codec context ? */
991  nb = 2;
992  else
993  nb = 1;
994 
995  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996  pts = sub->pts;
997  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
998  pts -= output_files[ost->file_index]->start_time;
999  for (i = 0; i < nb; i++) {
1000  unsigned save_num_rects = sub->num_rects;
1001 
1002  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1003  if (!check_recording_time(ost))
1004  return;
1005 
1006  sub->pts = pts;
1007  // start_display_time is required to be 0
1008  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1009  sub->end_display_time -= sub->start_display_time;
1010  sub->start_display_time = 0;
1011  if (i == 1)
1012  sub->num_rects = 0;
1013 
1014  ost->frames_encoded++;
1015 
1016  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1017  subtitle_out_max_size, sub);
1018  if (i == 1)
1019  sub->num_rects = save_num_rects;
1020  if (subtitle_out_size < 0) {
1021  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1022  exit_program(1);
1023  }
1024 
1025  av_init_packet(&pkt);
1026  pkt.data = subtitle_out;
1027  pkt.size = subtitle_out_size;
1028  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1029  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1031  /* XXX: the pts correction is handled here. Maybe handling
1032  it in the codec would be better */
1033  if (i == 0)
1034  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035  else
1036  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1037  }
1038  pkt.dts = pkt.pts;
1039  output_packet(of, &pkt, ost, 0);
1040  }
1041 }
1042 
1043 static void do_video_out(OutputFile *of,
1044  OutputStream *ost,
1045  AVFrame *next_picture,
1046  double sync_ipts)
1047 {
1048  int ret, format_video_sync;
1049  AVPacket pkt;
1050  AVCodecContext *enc = ost->enc_ctx;
1051  AVCodecParameters *mux_par = ost->st->codecpar;
1052  AVRational frame_rate;
1053  int nb_frames, nb0_frames, i;
1054  double delta, delta0;
1055  double duration = 0;
1056  int frame_size = 0;
1057  InputStream *ist = NULL;
1058  AVFilterContext *filter = ost->filter->filter;
1059 
1060  if (ost->source_index >= 0)
1061  ist = input_streams[ost->source_index];
1062 
1063  frame_rate = av_buffersink_get_frame_rate(filter);
1064  if (frame_rate.num > 0 && frame_rate.den > 0)
1065  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066 
1067  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1068  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069 
1070  if (!ost->filters_script &&
1071  !ost->filters &&
1072  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1073  next_picture &&
1074  ist &&
1075  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1076  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1077  }
1078 
1079  if (!next_picture) {
1080  //end, flushing
1081  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1082  ost->last_nb0_frames[1],
1083  ost->last_nb0_frames[2]);
1084  } else {
1085  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1086  delta = delta0 + duration;
1087 
1088  /* by default, we output a single frame */
1089  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1090  nb_frames = 1;
1091 
1092  format_video_sync = video_sync_method;
1093  if (format_video_sync == VSYNC_AUTO) {
1094  if(!strcmp(of->ctx->oformat->name, "avi")) {
1095  format_video_sync = VSYNC_VFR;
1096  } else
1097  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1098  if ( ist
1099  && format_video_sync == VSYNC_CFR
1100  && input_files[ist->file_index]->ctx->nb_streams == 1
1101  && input_files[ist->file_index]->input_ts_offset == 0) {
1102  format_video_sync = VSYNC_VSCFR;
1103  }
1104  if (format_video_sync == VSYNC_CFR && copy_ts) {
1105  format_video_sync = VSYNC_VSCFR;
1106  }
1107  }
1108  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1109 
1110  if (delta0 < 0 &&
1111  delta > 0 &&
1112  format_video_sync != VSYNC_PASSTHROUGH &&
1113  format_video_sync != VSYNC_DROP) {
1114  if (delta0 < -0.6) {
1115  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1116  } else
1117  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1118  sync_ipts = ost->sync_opts;
1119  duration += delta0;
1120  delta0 = 0;
1121  }
1122 
1123  switch (format_video_sync) {
1124  case VSYNC_VSCFR:
1125  if (ost->frame_number == 0 && delta0 >= 0.5) {
1126  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1127  delta = duration;
1128  delta0 = 0;
1129  ost->sync_opts = llrint(sync_ipts);
1130  }
1131  case VSYNC_CFR:
1132  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1133  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1134  nb_frames = 0;
1135  } else if (delta < -1.1)
1136  nb_frames = 0;
1137  else if (delta > 1.1) {
1138  nb_frames = lrintf(delta);
1139  if (delta0 > 1.1)
1140  nb0_frames = llrintf(delta0 - 0.6);
1141  }
1142  break;
1143  case VSYNC_VFR:
1144  if (delta <= -0.6)
1145  nb_frames = 0;
1146  else if (delta > 0.6)
1147  ost->sync_opts = llrint(sync_ipts);
1148  break;
1149  case VSYNC_DROP:
1150  case VSYNC_PASSTHROUGH:
1151  ost->sync_opts = llrint(sync_ipts);
1152  break;
1153  default:
1154  av_assert0(0);
1155  }
1156  }
1157 
1158  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1159  nb0_frames = FFMIN(nb0_frames, nb_frames);
1160 
1161  memmove(ost->last_nb0_frames + 1,
1162  ost->last_nb0_frames,
1163  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1164  ost->last_nb0_frames[0] = nb0_frames;
1165 
1166  if (nb0_frames == 0 && ost->last_dropped) {
1167  nb_frames_drop++;
1169  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1170  ost->frame_number, ost->st->index, ost->last_frame->pts);
1171  }
1172  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1173  if (nb_frames > dts_error_threshold * 30) {
1174  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1175  nb_frames_drop++;
1176  return;
1177  }
1178  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1179  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1180  if (nb_frames_dup > dup_warning) {
1181  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1182  dup_warning *= 10;
1183  }
1184  }
1185  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1186 
1187  /* duplicates frame if needed */
1188  for (i = 0; i < nb_frames; i++) {
1189  AVFrame *in_picture;
1190  int forced_keyframe = 0;
1191  double pts_time;
1192  av_init_packet(&pkt);
1193  pkt.data = NULL;
1194  pkt.size = 0;
1195 
1196  if (i < nb0_frames && ost->last_frame) {
1197  in_picture = ost->last_frame;
1198  } else
1199  in_picture = next_picture;
1200 
1201  if (!in_picture)
1202  return;
1203 
1204  in_picture->pts = ost->sync_opts;
1205 
1206  if (!check_recording_time(ost))
1207  return;
1208 
1210  ost->top_field_first >= 0)
1211  in_picture->top_field_first = !!ost->top_field_first;
1212 
1213  if (in_picture->interlaced_frame) {
1214  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1215  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1216  else
1217  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1218  } else
1219  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1220 
1221  in_picture->quality = enc->global_quality;
1222  in_picture->pict_type = 0;
1223 
1224  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1225  in_picture->pts != AV_NOPTS_VALUE)
1226  ost->forced_kf_ref_pts = in_picture->pts;
1227 
1228  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1229  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1230  if (ost->forced_kf_index < ost->forced_kf_count &&
1231  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1232  ost->forced_kf_index++;
1233  forced_keyframe = 1;
1234  } else if (ost->forced_keyframes_pexpr) {
1235  double res;
1236  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1237  res = av_expr_eval(ost->forced_keyframes_pexpr,
1238  ost->forced_keyframes_expr_const_values, NULL);
1239  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1240  ost->forced_keyframes_expr_const_values[FKF_N],
1241  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1242  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1243  ost->forced_keyframes_expr_const_values[FKF_T],
1244  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1245  res);
1246  if (res) {
1247  forced_keyframe = 1;
1248  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1249  ost->forced_keyframes_expr_const_values[FKF_N];
1250  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1251  ost->forced_keyframes_expr_const_values[FKF_T];
1252  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1253  }
1254 
1255  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1256  } else if ( ost->forced_keyframes
1257  && !strncmp(ost->forced_keyframes, "source", 6)
1258  && in_picture->key_frame==1
1259  && !i) {
1260  forced_keyframe = 1;
1261  }
1262 
1263  if (forced_keyframe) {
1264  in_picture->pict_type = AV_PICTURE_TYPE_I;
1265  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1266  }
1267 
1269  if (debug_ts) {
1270  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1271  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1272  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1273  enc->time_base.num, enc->time_base.den);
1274  }
1275 
1276  ost->frames_encoded++;
1277 
1278  ret = avcodec_send_frame(enc, in_picture);
1279  if (ret < 0)
1280  goto error;
1281  // Make sure Closed Captions will not be duplicated
1283 
1284  while (1) {
1285  ret = avcodec_receive_packet(enc, &pkt);
1286  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287  if (ret == AVERROR(EAGAIN))
1288  break;
1289  if (ret < 0)
1290  goto error;
1291 
1292  if (debug_ts) {
1293  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1297  }
1298 
1300  pkt.pts = ost->sync_opts;
1301 
1302  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1303 
1304  if (debug_ts) {
1305  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1309  }
1310 
1311  frame_size = pkt.size;
1312  output_packet(of, &pkt, ost, 0);
1313 
1314  /* if two pass, output log */
1315  if (ost->logfile && enc->stats_out) {
1316  fprintf(ost->logfile, "%s", enc->stats_out);
1317  }
1318  }
1319  ost->sync_opts++;
1320  /*
1321  * For video, number of frames in == number of packets out.
1322  * But there may be reordering, so we can't throw away frames on encoder
1323  * flush, we need to limit them here, before they go into encoder.
1324  */
1325  ost->frame_number++;
1326 
1327  if (vstats_filename && frame_size)
1329  }
1330 
1331  if (!ost->last_frame)
1332  ost->last_frame = av_frame_alloc();
1333  av_frame_unref(ost->last_frame);
1334  if (next_picture && ost->last_frame)
1335  av_frame_ref(ost->last_frame, next_picture);
1336  else
1337  av_frame_free(&ost->last_frame);
1338 
1339  return;
1340 error:
1341  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1342  exit_program(1);
1343 }
1344 
1345 static double psnr(double d)
1346 {
1347  return -10.0 * log10(d);
1348 }
1349 
1351 {
1352  AVCodecContext *enc;
1353  int frame_number;
1354  double ti1, bitrate, avg_bitrate;
1355 
1356  /* this is executed just the first time do_video_stats is called */
1357  if (!vstats_file) {
1358  vstats_file = fopen(vstats_filename, "w");
1359  if (!vstats_file) {
1360  perror("fopen");
1361  exit_program(1);
1362  }
1363  }
1364 
1365  enc = ost->enc_ctx;
1366  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1367  frame_number = ost->st->nb_frames;
1368  if (vstats_version <= 1) {
1369  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1370  ost->quality / (float)FF_QP2LAMBDA);
1371  } else {
1372  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1373  ost->quality / (float)FF_QP2LAMBDA);
1374  }
1375 
1376  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1377  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1378 
1379  fprintf(vstats_file,"f_size= %6d ", frame_size);
1380  /* compute pts value */
1381  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1382  if (ti1 < 0.01)
1383  ti1 = 0.01;
1384 
1385  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1386  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1387  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1388  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1389  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1390  }
1391 }
1392 
1393 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1394 
1396 {
1397  OutputFile *of = output_files[ost->file_index];
1398  int i;
1399 
1400  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1401 
1402  if (of->shortest) {
1403  for (i = 0; i < of->ctx->nb_streams; i++)
1405  }
1406 }
1407 
1408 /**
1409  * Get and encode new output from any of the filtergraphs, without causing
1410  * activity.
1411  *
1412  * @return 0 for success, <0 for severe errors
1413  */
1414 static int reap_filters(int flush)
1415 {
1416  AVFrame *filtered_frame = NULL;
1417  int i;
1418 
1419  /* Reap all buffers present in the buffer sinks */
1420  for (i = 0; i < nb_output_streams; i++) {
1422  OutputFile *of = output_files[ost->file_index];
1424  AVCodecContext *enc = ost->enc_ctx;
1425  int ret = 0;
1426 
1427  if (!ost->filter || !ost->filter->graph->graph)
1428  continue;
1429  filter = ost->filter->filter;
1430 
1431  if (!ost->initialized) {
1432  char error[1024] = "";
1433  ret = init_output_stream(ost, error, sizeof(error));
1434  if (ret < 0) {
1435  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1436  ost->file_index, ost->index, error);
1437  exit_program(1);
1438  }
1439  }
1440 
1441  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1442  return AVERROR(ENOMEM);
1443  }
1444  filtered_frame = ost->filtered_frame;
1445 
1446  while (1) {
1447  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1448  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1450  if (ret < 0) {
1451  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1453  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1454  } else if (flush && ret == AVERROR_EOF) {
1457  }
1458  break;
1459  }
1460  if (ost->finished) {
1461  av_frame_unref(filtered_frame);
1462  continue;
1463  }
1464  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1465  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1467  AVRational tb = enc->time_base;
1468  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1469 
1470  tb.den <<= extra_bits;
1471  float_pts =
1472  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1474  float_pts /= 1 << extra_bits;
1475  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1476  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1477 
1478  filtered_frame->pts =
1479  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1481  }
1482 
1483  switch (av_buffersink_get_type(filter)) {
1484  case AVMEDIA_TYPE_VIDEO:
1485  if (!ost->frame_aspect_ratio.num)
1486  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1487 
1488  if (debug_ts) {
1489  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1490  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1491  float_pts,
1492  enc->time_base.num, enc->time_base.den);
1493  }
1494 
1495  do_video_out(of, ost, filtered_frame, float_pts);
1496  break;
1497  case AVMEDIA_TYPE_AUDIO:
1498  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1499  enc->channels != filtered_frame->channels) {
1501  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1502  break;
1503  }
1504  do_audio_out(of, ost, filtered_frame);
1505  break;
1506  default:
1507  // TODO support subtitle filters
1508  av_assert0(0);
1509  }
1510 
1511  av_frame_unref(filtered_frame);
1512  }
1513  }
1514 
1515  return 0;
1516 }
1517 
1518 static void print_final_stats(int64_t total_size)
1519 {
1520  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1521  uint64_t subtitle_size = 0;
1522  uint64_t data_size = 0;
1523  float percent = -1.0;
1524  int i, j;
1525  int pass1_used = 1;
1526 
1527  for (i = 0; i < nb_output_streams; i++) {
1529  switch (ost->enc_ctx->codec_type) {
1530  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1531  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1532  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1533  default: other_size += ost->data_size; break;
1534  }
1535  extra_size += ost->enc_ctx->extradata_size;
1536  data_size += ost->data_size;
1537  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1539  pass1_used = 0;
1540  }
1541 
1542  if (data_size && total_size>0 && total_size >= data_size)
1543  percent = 100.0 * (total_size - data_size) / data_size;
1544 
1545  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1546  video_size / 1024.0,
1547  audio_size / 1024.0,
1548  subtitle_size / 1024.0,
1549  other_size / 1024.0,
1550  extra_size / 1024.0);
1551  if (percent >= 0.0)
1552  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1553  else
1554  av_log(NULL, AV_LOG_INFO, "unknown");
1555  av_log(NULL, AV_LOG_INFO, "\n");
1556 
1557  /* print verbose per-stream stats */
1558  for (i = 0; i < nb_input_files; i++) {
1559  InputFile *f = input_files[i];
1560  uint64_t total_packets = 0, total_size = 0;
1561 
1562  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1563  i, f->ctx->url);
1564 
1565  for (j = 0; j < f->nb_streams; j++) {
1566  InputStream *ist = input_streams[f->ist_index + j];
1567  enum AVMediaType type = ist->dec_ctx->codec_type;
1568 
1569  total_size += ist->data_size;
1570  total_packets += ist->nb_packets;
1571 
1572  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1573  i, j, media_type_string(type));
1574  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1575  ist->nb_packets, ist->data_size);
1576 
1577  if (ist->decoding_needed) {
1578  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1579  ist->frames_decoded);
1580  if (type == AVMEDIA_TYPE_AUDIO)
1581  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1582  av_log(NULL, AV_LOG_VERBOSE, "; ");
1583  }
1584 
1585  av_log(NULL, AV_LOG_VERBOSE, "\n");
1586  }
1587 
1588  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1589  total_packets, total_size);
1590  }
1591 
1592  for (i = 0; i < nb_output_files; i++) {
1593  OutputFile *of = output_files[i];
1594  uint64_t total_packets = 0, total_size = 0;
1595 
1596  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1597  i, of->ctx->url);
1598 
1599  for (j = 0; j < of->ctx->nb_streams; j++) {
1601  enum AVMediaType type = ost->enc_ctx->codec_type;
1602 
1603  total_size += ost->data_size;
1604  total_packets += ost->packets_written;
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1607  i, j, media_type_string(type));
1608  if (ost->encoding_needed) {
1609  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1610  ost->frames_encoded);
1611  if (type == AVMEDIA_TYPE_AUDIO)
1612  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1613  av_log(NULL, AV_LOG_VERBOSE, "; ");
1614  }
1615 
1616  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1617  ost->packets_written, ost->data_size);
1618 
1619  av_log(NULL, AV_LOG_VERBOSE, "\n");
1620  }
1621 
1622  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1623  total_packets, total_size);
1624  }
1625  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1626  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1627  if (pass1_used) {
1628  av_log(NULL, AV_LOG_WARNING, "\n");
1629  } else {
1630  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1631  }
1632  }
1633 }
1634 
1635 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1636 {
1637  AVBPrint buf, buf_script;
1638  OutputStream *ost;
1639  AVFormatContext *oc;
1640  int64_t total_size;
1641  AVCodecContext *enc;
1642  int frame_number, vid, i;
1643  double bitrate;
1644  double speed;
1645  int64_t pts = INT64_MIN + 1;
1646  static int64_t last_time = -1;
1647  static int qp_histogram[52];
1648  int hours, mins, secs, us;
1649  const char *hours_sign;
1650  int ret;
1651  float t;
1652 
1653  if (!print_stats && !is_last_report && !progress_avio)
1654  return;
1655 
1656  if (!is_last_report) {
1657  if (last_time == -1) {
1658  last_time = cur_time;
1659  return;
1660  }
1661  if ((cur_time - last_time) < 500000)
1662  return;
1663  last_time = cur_time;
1664  }
1665 
1666  t = (cur_time-timer_start) / 1000000.0;
1667 
1668 
1669  oc = output_files[0]->ctx;
1670 
1671  total_size = avio_size(oc->pb);
1672  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1673  total_size = avio_tell(oc->pb);
1674 
1675  vid = 0;
1677  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1678  for (i = 0; i < nb_output_streams; i++) {
1679  float q = -1;
1680  ost = output_streams[i];
1681  enc = ost->enc_ctx;
1682  if (!ost->stream_copy)
1683  q = ost->quality / (float) FF_QP2LAMBDA;
1684 
1685  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686  av_bprintf(&buf, "q=%2.1f ", q);
1687  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1688  ost->file_index, ost->index, q);
1689  }
1690  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1691  float fps;
1692 
1693  frame_number = ost->frame_number;
1694  fps = t > 1 ? frame_number / t : 0;
1695  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1696  frame_number, fps < 9.95, fps, q);
1697  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1698  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1699  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700  ost->file_index, ost->index, q);
1701  if (is_last_report)
1702  av_bprintf(&buf, "L");
1703  if (qp_hist) {
1704  int j;
1705  int qp = lrintf(q);
1706  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1707  qp_histogram[qp]++;
1708  for (j = 0; j < 32; j++)
1709  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1710  }
1711 
1712  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1713  int j;
1714  double error, error_sum = 0;
1715  double scale, scale_sum = 0;
1716  double p;
1717  char type[3] = { 'Y','U','V' };
1718  av_bprintf(&buf, "PSNR=");
1719  for (j = 0; j < 3; j++) {
1720  if (is_last_report) {
1721  error = enc->error[j];
1722  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1723  } else {
1724  error = ost->error[j];
1725  scale = enc->width * enc->height * 255.0 * 255.0;
1726  }
1727  if (j)
1728  scale /= 4;
1729  error_sum += error;
1730  scale_sum += scale;
1731  p = psnr(error / scale);
1732  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1733  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1734  ost->file_index, ost->index, type[j] | 32, p);
1735  }
1736  p = psnr(error_sum / scale_sum);
1737  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1738  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1739  ost->file_index, ost->index, p);
1740  }
1741  vid = 1;
1742  }
1743  /* compute min output value */
1746  ost->st->time_base, AV_TIME_BASE_Q));
1747  if (is_last_report)
1748  nb_frames_drop += ost->last_dropped;
1749  }
1750 
1751  secs = FFABS(pts) / AV_TIME_BASE;
1752  us = FFABS(pts) % AV_TIME_BASE;
1753  mins = secs / 60;
1754  secs %= 60;
1755  hours = mins / 60;
1756  mins %= 60;
1757  hours_sign = (pts < 0) ? "-" : "";
1758 
1759  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1760  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1761 
1762  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1763  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1764  if (pts == AV_NOPTS_VALUE) {
1765  av_bprintf(&buf, "N/A ");
1766  } else {
1767  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1768  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1769  }
1770 
1771  if (bitrate < 0) {
1772  av_bprintf(&buf, "bitrate=N/A");
1773  av_bprintf(&buf_script, "bitrate=N/A\n");
1774  }else{
1775  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1776  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1777  }
1778 
1779  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1780  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1781  if (pts == AV_NOPTS_VALUE) {
1782  av_bprintf(&buf_script, "out_time_us=N/A\n");
1783  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1784  av_bprintf(&buf_script, "out_time=N/A\n");
1785  } else {
1786  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1787  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1788  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1789  hours_sign, hours, mins, secs, us);
1790  }
1791 
1793  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1794  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1795  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1796 
1797  if (speed < 0) {
1798  av_bprintf(&buf, " speed=N/A");
1799  av_bprintf(&buf_script, "speed=N/A\n");
1800  } else {
1801  av_bprintf(&buf, " speed=%4.3gx", speed);
1802  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1803  }
1804 
1805  if (print_stats || is_last_report) {
1806  const char end = is_last_report ? '\n' : '\r';
1807  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1808  fprintf(stderr, "%s %c", buf.str, end);
1809  } else
1810  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1811 
1812  fflush(stderr);
1813  }
1814  av_bprint_finalize(&buf, NULL);
1815 
1816  if (progress_avio) {
1817  av_bprintf(&buf_script, "progress=%s\n",
1818  is_last_report ? "end" : "continue");
1819  avio_write(progress_avio, buf_script.str,
1820  FFMIN(buf_script.len, buf_script.size - 1));
1822  av_bprint_finalize(&buf_script, NULL);
1823  if (is_last_report) {
1824  if ((ret = avio_closep(&progress_avio)) < 0)
1826  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1827  }
1828  }
1829 
1830  if (is_last_report)
1831  print_final_stats(total_size);
1832 }
1833 
1835 {
1836  // We never got any input. Set a fake format, which will
1837  // come from libavformat.
1838  ifilter->format = par->format;
1839  ifilter->sample_rate = par->sample_rate;
1840  ifilter->channels = par->channels;
1841  ifilter->channel_layout = par->channel_layout;
1842  ifilter->width = par->width;
1843  ifilter->height = par->height;
1844  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1845 }
1846 
1847 static void flush_encoders(void)
1848 {
1849  int i, ret;
1850 
1851  for (i = 0; i < nb_output_streams; i++) {
1853  AVCodecContext *enc = ost->enc_ctx;
1854  OutputFile *of = output_files[ost->file_index];
1855 
1856  if (!ost->encoding_needed)
1857  continue;
1858 
1859  // Try to enable encoding with no input frames.
1860  // Maybe we should just let encoding fail instead.
1861  if (!ost->initialized) {
1862  FilterGraph *fg = ost->filter->graph;
1863  char error[1024] = "";
1864 
1866  "Finishing stream %d:%d without any data written to it.\n",
1867  ost->file_index, ost->st->index);
1868 
1869  if (ost->filter && !fg->graph) {
1870  int x;
1871  for (x = 0; x < fg->nb_inputs; x++) {
1872  InputFilter *ifilter = fg->inputs[x];
1873  if (ifilter->format < 0)
1874  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1875  }
1876 
1878  continue;
1879 
1880  ret = configure_filtergraph(fg);
1881  if (ret < 0) {
1882  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1883  exit_program(1);
1884  }
1885 
1887  }
1888 
1889  ret = init_output_stream(ost, error, sizeof(error));
1890  if (ret < 0) {
1891  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1892  ost->file_index, ost->index, error);
1893  exit_program(1);
1894  }
1895  }
1896 
1898  continue;
1899 
1900  for (;;) {
1901  const char *desc = NULL;
1902  AVPacket pkt;
1903  int pkt_size;
1904 
1905  switch (enc->codec_type) {
1906  case AVMEDIA_TYPE_AUDIO:
1907  desc = "audio";
1908  break;
1909  case AVMEDIA_TYPE_VIDEO:
1910  desc = "video";
1911  break;
1912  default:
1913  av_assert0(0);
1914  }
1915 
1916  av_init_packet(&pkt);
1917  pkt.data = NULL;
1918  pkt.size = 0;
1919 
1921 
1922  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1923  ret = avcodec_send_frame(enc, NULL);
1924  if (ret < 0) {
1925  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1926  desc,
1927  av_err2str(ret));
1928  exit_program(1);
1929  }
1930  }
1931 
1932  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1933  if (ret < 0 && ret != AVERROR_EOF) {
1934  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935  desc,
1936  av_err2str(ret));
1937  exit_program(1);
1938  }
1939  if (ost->logfile && enc->stats_out) {
1940  fprintf(ost->logfile, "%s", enc->stats_out);
1941  }
1942  if (ret == AVERROR_EOF) {
1943  output_packet(of, &pkt, ost, 1);
1944  break;
1945  }
1946  if (ost->finished & MUXER_FINISHED) {
1947  av_packet_unref(&pkt);
1948  continue;
1949  }
1950  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1951  pkt_size = pkt.size;
1952  output_packet(of, &pkt, ost, 0);
1953  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1954  do_video_stats(ost, pkt_size);
1955  }
1956  }
1957  }
1958 }
1959 
1960 /*
1961  * Check whether a packet from ist should be written into ost at this time
1962  */
1964 {
1965  OutputFile *of = output_files[ost->file_index];
1966  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1967 
1968  if (ost->source_index != ist_index)
1969  return 0;
1970 
1971  if (ost->finished)
1972  return 0;
1973 
1974  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1975  return 0;
1976 
1977  return 1;
1978 }
1979 
1981 {
1982  OutputFile *of = output_files[ost->file_index];
1983  InputFile *f = input_files [ist->file_index];
1984  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1985  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1986  AVPacket opkt;
1987 
1988  // EOF: flush output bitstream filters.
1989  if (!pkt) {
1990  av_init_packet(&opkt);
1991  opkt.data = NULL;
1992  opkt.size = 0;
1993  output_packet(of, &opkt, ost, 1);
1994  return;
1995  }
1996 
1997  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1998  !ost->copy_initial_nonkeyframes)
1999  return;
2000 
2001  if (!ost->frame_number && !ost->copy_prior_start) {
2002  int64_t comp_start = start_time;
2003  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2004  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2005  if (pkt->pts == AV_NOPTS_VALUE ?
2006  ist->pts < comp_start :
2007  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2008  return;
2009  }
2010 
2011  if (of->recording_time != INT64_MAX &&
2012  ist->pts >= of->recording_time + start_time) {
2014  return;
2015  }
2016 
2017  if (f->recording_time != INT64_MAX) {
2018  start_time = f->ctx->start_time;
2019  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2020  start_time += f->start_time;
2021  if (ist->pts >= f->recording_time + start_time) {
2023  return;
2024  }
2025  }
2026 
2027  /* force the input stream PTS */
2028  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2029  ost->sync_opts++;
2030 
2031  if (av_packet_ref(&opkt, pkt) < 0)
2032  exit_program(1);
2033 
2034  if (pkt->pts != AV_NOPTS_VALUE)
2035  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 
2037  if (pkt->dts == AV_NOPTS_VALUE) {
2038  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2039  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2041  if(!duration)
2042  duration = ist->dec_ctx->frame_size;
2043  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2044  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2045  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2046  /* dts will be set immediately afterwards to what pts is now */
2047  opkt.pts = opkt.dts - ost_tb_start_time;
2048  } else
2049  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2050  opkt.dts -= ost_tb_start_time;
2051 
2052  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053 
2054  output_packet(of, &opkt, ost, 0);
2055 }
2056 
2058 {
2059  AVCodecContext *dec = ist->dec_ctx;
2060 
2061  if (!dec->channel_layout) {
2062  char layout_name[256];
2063 
2064  if (dec->channels > ist->guess_layout_max)
2065  return 0;
2066  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2067  if (!dec->channel_layout)
2068  return 0;
2069  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2070  dec->channels, dec->channel_layout);
2071  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2072  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2073  }
2074  return 1;
2075 }
2076 
2077 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2078 {
2079  if (*got_output || ret<0)
2080  decode_error_stat[ret<0] ++;
2081 
2082  if (ret < 0 && exit_on_error)
2083  exit_program(1);
2084 
2085  if (*got_output && ist) {
2088  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2089  if (exit_on_error)
2090  exit_program(1);
2091  }
2092  }
2093 }
2094 
2095 // Filters can be configured only if the formats of all inputs are known.
2097 {
2098  int i;
2099  for (i = 0; i < fg->nb_inputs; i++) {
2100  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2101  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2102  return 0;
2103  }
2104  return 1;
2105 }
2106 
2108 {
2109  FilterGraph *fg = ifilter->graph;
2110  int need_reinit, ret, i;
2111 
2112  /* determine if the parameters for this input changed */
2113  need_reinit = ifilter->format != frame->format;
2114 
2115  switch (ifilter->ist->st->codecpar->codec_type) {
2116  case AVMEDIA_TYPE_AUDIO:
2117  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2118  ifilter->channels != frame->channels ||
2119  ifilter->channel_layout != frame->channel_layout;
2120  break;
2121  case AVMEDIA_TYPE_VIDEO:
2122  need_reinit |= ifilter->width != frame->width ||
2123  ifilter->height != frame->height;
2124  break;
2125  }
2126 
2127  if (!ifilter->ist->reinit_filters && fg->graph)
2128  need_reinit = 0;
2129 
2130  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2131  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2132  need_reinit = 1;
2133 
2134  if (need_reinit) {
2136  if (ret < 0)
2137  return ret;
2138  }
2139 
2140  /* (re)init the graph if possible, otherwise buffer the frame and return */
2141  if (need_reinit || !fg->graph) {
2142  for (i = 0; i < fg->nb_inputs; i++) {
2143  if (!ifilter_has_all_input_formats(fg)) {
2145  if (!tmp)
2146  return AVERROR(ENOMEM);
2148 
2149  if (!av_fifo_space(ifilter->frame_queue)) {
2150  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2151  if (ret < 0) {
2152  av_frame_free(&tmp);
2153  return ret;
2154  }
2155  }
2156  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2157  return 0;
2158  }
2159  }
2160 
2161  ret = reap_filters(1);
2162  if (ret < 0 && ret != AVERROR_EOF) {
2163  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2164  return ret;
2165  }
2166 
2167  ret = configure_filtergraph(fg);
2168  if (ret < 0) {
2169  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2170  return ret;
2171  }
2172  }
2173 
2175  if (ret < 0) {
2176  if (ret != AVERROR_EOF)
2177  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2178  return ret;
2179  }
2180 
2181  return 0;
2182 }
2183 
2184 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2185 {
2186  int ret;
2187 
2188  ifilter->eof = 1;
2189 
2190  if (ifilter->filter) {
2192  if (ret < 0)
2193  return ret;
2194  } else {
2195  // the filtergraph was never configured
2196  if (ifilter->format < 0)
2197  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2198  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2199  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2200  return AVERROR_INVALIDDATA;
2201  }
2202  }
2203 
2204  return 0;
2205 }
2206 
2207 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2208 // There is the following difference: if you got a frame, you must call
2209 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2210 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2211 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2212 {
2213  int ret;
2214 
2215  *got_frame = 0;
2216 
2217  if (pkt) {
2218  ret = avcodec_send_packet(avctx, pkt);
2219  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2220  // decoded frames with avcodec_receive_frame() until done.
2221  if (ret < 0 && ret != AVERROR_EOF)
2222  return ret;
2223  }
2224 
2225  ret = avcodec_receive_frame(avctx, frame);
2226  if (ret < 0 && ret != AVERROR(EAGAIN))
2227  return ret;
2228  if (ret >= 0)
2229  *got_frame = 1;
2230 
2231  return 0;
2232 }
2233 
2235 {
2236  int i, ret;
2237  AVFrame *f;
2238 
2239  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2240  for (i = 0; i < ist->nb_filters; i++) {
2241  if (i < ist->nb_filters - 1) {
2242  f = ist->filter_frame;
2244  if (ret < 0)
2245  break;
2246  } else
2247  f = decoded_frame;
2248  ret = ifilter_send_frame(ist->filters[i], f);
2249  if (ret == AVERROR_EOF)
2250  ret = 0; /* ignore */
2251  if (ret < 0) {
2253  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2254  break;
2255  }
2256  }
2257  return ret;
2258 }
2259 
2261  int *decode_failed)
2262 {
2264  AVCodecContext *avctx = ist->dec_ctx;
2265  int ret, err = 0;
2266  AVRational decoded_frame_tb;
2267 
2268  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2269  return AVERROR(ENOMEM);
2270  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2271  return AVERROR(ENOMEM);
2273 
2275  ret = decode(avctx, decoded_frame, got_output, pkt);
2276  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2277  if (ret < 0)
2278  *decode_failed = 1;
2279 
2280  if (ret >= 0 && avctx->sample_rate <= 0) {
2281  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2283  }
2284 
2285  if (ret != AVERROR_EOF)
2287 
2288  if (!*got_output || ret < 0)
2289  return ret;
2290 
2292  ist->frames_decoded++;
2293 
2294  /* increment next_dts to use for the case where the input stream does not
2295  have timestamps or there are multiple frames in the packet */
2296  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2297  avctx->sample_rate;
2298  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2299  avctx->sample_rate;
2300 
2301  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2302  decoded_frame_tb = ist->st->time_base;
2303  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2304  decoded_frame->pts = pkt->pts;
2305  decoded_frame_tb = ist->st->time_base;
2306  }else {
2307  decoded_frame->pts = ist->dts;
2308  decoded_frame_tb = AV_TIME_BASE_Q;
2309  }
2311  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2312  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2313  (AVRational){1, avctx->sample_rate});
2316 
2319  return err < 0 ? err : ret;
2320 }
2321 
2322 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2323  int *decode_failed)
2324 {
2326  int i, ret = 0, err = 0;
2327  int64_t best_effort_timestamp;
2328  int64_t dts = AV_NOPTS_VALUE;
2329  AVPacket avpkt;
2330 
2331  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2332  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2333  // skip the packet.
2334  if (!eof && pkt && pkt->size == 0)
2335  return 0;
2336 
2337  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2338  return AVERROR(ENOMEM);
2339  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2340  return AVERROR(ENOMEM);
2342  if (ist->dts != AV_NOPTS_VALUE)
2343  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2344  if (pkt) {
2345  avpkt = *pkt;
2346  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2347  }
2348 
2349  // The old code used to set dts on the drain packet, which does not work
2350  // with the new API anymore.
2351  if (eof) {
2352  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2353  if (!new)
2354  return AVERROR(ENOMEM);
2355  ist->dts_buffer = new;
2356  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2357  }
2358 
2360  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2361  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2362  if (ret < 0)
2363  *decode_failed = 1;
2364 
2365  // The following line may be required in some cases where there is no parser
2366  // or the parser does not has_b_frames correctly
2367  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2368  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2369  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2370  } else
2372  "video_delay is larger in decoder than demuxer %d > %d.\n"
2373  "If you want to help, upload a sample "
2374  "of this file to https://streams.videolan.org/upload/ "
2375  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2376  ist->dec_ctx->has_b_frames,
2377  ist->st->codecpar->video_delay);
2378  }
2379 
2380  if (ret != AVERROR_EOF)
2382 
2383  if (*got_output && ret >= 0) {
2384  if (ist->dec_ctx->width != decoded_frame->width ||
2385  ist->dec_ctx->height != decoded_frame->height ||
2386  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2387  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2391  ist->dec_ctx->width,
2392  ist->dec_ctx->height,
2393  ist->dec_ctx->pix_fmt);
2394  }
2395  }
2396 
2397  if (!*got_output || ret < 0)
2398  return ret;
2399 
2400  if(ist->top_field_first>=0)
2402 
2403  ist->frames_decoded++;
2404 
2406  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2407  if (err < 0)
2408  goto fail;
2409  }
2411 
2412  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2413  *duration_pts = decoded_frame->pkt_duration;
2414 
2415  if (ist->framerate.num)
2416  best_effort_timestamp = ist->cfr_next_pts++;
2417 
2418  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2419  best_effort_timestamp = ist->dts_buffer[0];
2420 
2421  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2422  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2423  ist->nb_dts_buffer--;
2424  }
2425 
2426  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2427  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2428 
2429  if (ts != AV_NOPTS_VALUE)
2430  ist->next_pts = ist->pts = ts;
2431  }
2432 
2433  if (debug_ts) {
2434  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2435  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2436  ist->st->index, av_ts2str(decoded_frame->pts),
2438  best_effort_timestamp,
2439  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2441  ist->st->time_base.num, ist->st->time_base.den);
2442  }
2443 
2444  if (ist->st->sample_aspect_ratio.num)
2446 
2448 
2449 fail:
2452  return err < 0 ? err : ret;
2453 }
2454 
2456  int *decode_failed)
2457 {
2459  int free_sub = 1;
2460  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2461  &subtitle, got_output, pkt);
2462 
2464 
2465  if (ret < 0 || !*got_output) {
2466  *decode_failed = 1;
2467  if (!pkt->size)
2468  sub2video_flush(ist);
2469  return ret;
2470  }
2471 
2472  if (ist->fix_sub_duration) {
2473  int end = 1;
2474  if (ist->prev_sub.got_output) {
2476  1000, AV_TIME_BASE);
2477  if (end < ist->prev_sub.subtitle.end_display_time) {
2478  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2479  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2481  end <= 0 ? ", dropping it" : "");
2483  }
2484  }
2485  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2486  FFSWAP(int, ret, ist->prev_sub.ret);
2488  if (end <= 0)
2489  goto out;
2490  }
2491 
2492  if (!*got_output)
2493  return ret;
2494 
2495  if (ist->sub2video.frame) {
2496  sub2video_update(ist, INT64_MIN, &subtitle);
2497  } else if (ist->nb_filters) {
2498  if (!ist->sub2video.sub_queue)
2499  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2500  if (!ist->sub2video.sub_queue)
2501  exit_program(1);
2502  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2504  if (ret < 0)
2505  exit_program(1);
2506  }
2508  free_sub = 0;
2509  }
2510 
2511  if (!subtitle.num_rects)
2512  goto out;
2513 
2514  ist->frames_decoded++;
2515 
2516  for (i = 0; i < nb_output_streams; i++) {
2518 
2519  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2520  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2521  continue;
2522 
2523  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2524  }
2525 
2526 out:
2527  if (free_sub)
2529  return ret;
2530 }
2531 
2533 {
2534  int i, ret;
2535  /* TODO keep pts also in stream time base to avoid converting back */
2536  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2538 
2539  for (i = 0; i < ist->nb_filters; i++) {
2540  ret = ifilter_send_eof(ist->filters[i], pts);
2541  if (ret < 0)
2542  return ret;
2543  }
2544  return 0;
2545 }
2546 
2547 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2548 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2549 {
2550  int ret = 0, i;
2551  int repeating = 0;
2552  int eof_reached = 0;
2553 
2554  AVPacket avpkt;
2555  if (!ist->saw_first_ts) {
2556  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2557  ist->pts = 0;
2558  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2559  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2560  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2561  }
2562  ist->saw_first_ts = 1;
2563  }
2564 
2565  if (ist->next_dts == AV_NOPTS_VALUE)
2566  ist->next_dts = ist->dts;
2567  if (ist->next_pts == AV_NOPTS_VALUE)
2568  ist->next_pts = ist->pts;
2569 
2570  if (!pkt) {
2571  /* EOF handling */
2572  av_init_packet(&avpkt);
2573  avpkt.data = NULL;
2574  avpkt.size = 0;
2575  } else {
2576  avpkt = *pkt;
2577  }
2578 
2579  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2580  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2581  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2582  ist->next_pts = ist->pts = ist->dts;
2583  }
2584 
2585  // while we have more to decode or while the decoder did output something on EOF
2586  while (ist->decoding_needed) {
2587  int64_t duration_dts = 0;
2588  int64_t duration_pts = 0;
2589  int got_output = 0;
2590  int decode_failed = 0;
2591 
2592  ist->pts = ist->next_pts;
2593  ist->dts = ist->next_dts;
2594 
2595  switch (ist->dec_ctx->codec_type) {
2596  case AVMEDIA_TYPE_AUDIO:
2597  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2598  &decode_failed);
2599  break;
2600  case AVMEDIA_TYPE_VIDEO:
2601  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2602  &decode_failed);
2603  if (!repeating || !pkt || got_output) {
2604  if (pkt && pkt->duration) {
2605  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2606  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2608  duration_dts = ((int64_t)AV_TIME_BASE *
2609  ist->dec_ctx->framerate.den * ticks) /
2611  }
2612 
2613  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2614  ist->next_dts += duration_dts;
2615  }else
2616  ist->next_dts = AV_NOPTS_VALUE;
2617  }
2618 
2619  if (got_output) {
2620  if (duration_pts > 0) {
2621  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2622  } else {
2623  ist->next_pts += duration_dts;
2624  }
2625  }
2626  break;
2627  case AVMEDIA_TYPE_SUBTITLE:
2628  if (repeating)
2629  break;
2630  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2631  if (!pkt && ret >= 0)
2632  ret = AVERROR_EOF;
2633  break;
2634  default:
2635  return -1;
2636  }
2637 
2638  if (ret == AVERROR_EOF) {
2639  eof_reached = 1;
2640  break;
2641  }
2642 
2643  if (ret < 0) {
2644  if (decode_failed) {
2645  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2646  ist->file_index, ist->st->index, av_err2str(ret));
2647  } else {
2648  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2649  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2650  }
2651  if (!decode_failed || exit_on_error)
2652  exit_program(1);
2653  break;
2654  }
2655 
2656  if (got_output)
2657  ist->got_output = 1;
2658 
2659  if (!got_output)
2660  break;
2661 
2662  // During draining, we might get multiple output frames in this loop.
2663  // ffmpeg.c does not drain the filter chain on configuration changes,
2664  // which means if we send multiple frames at once to the filters, and
2665  // one of those frames changes configuration, the buffered frames will
2666  // be lost. This can upset certain FATE tests.
2667  // Decode only 1 frame per call on EOF to appease these FATE tests.
2668  // The ideal solution would be to rewrite decoding to use the new
2669  // decoding API in a better way.
2670  if (!pkt)
2671  break;
2672 
2673  repeating = 1;
2674  }
2675 
2676  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2677  /* except when looping we need to flush but not to send an EOF */
2678  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2679  int ret = send_filter_eof(ist);
2680  if (ret < 0) {
2681  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2682  exit_program(1);
2683  }
2684  }
2685 
2686  /* handle stream copy */
2687  if (!ist->decoding_needed && pkt) {
2688  ist->dts = ist->next_dts;
2689  switch (ist->dec_ctx->codec_type) {
2690  case AVMEDIA_TYPE_AUDIO:
2691  av_assert1(pkt->duration >= 0);
2692  if (ist->dec_ctx->sample_rate) {
2693  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2694  ist->dec_ctx->sample_rate;
2695  } else {
2697  }
2698  break;
2699  case AVMEDIA_TYPE_VIDEO:
2700  if (ist->framerate.num) {
2701  // TODO: Remove work-around for c99-to-c89 issue 7
2702  AVRational time_base_q = AV_TIME_BASE_Q;
2703  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2704  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2705  } else if (pkt->duration) {
2707  } else if(ist->dec_ctx->framerate.num != 0) {
2708  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2709  ist->next_dts += ((int64_t)AV_TIME_BASE *
2710  ist->dec_ctx->framerate.den * ticks) /
2712  }
2713  break;
2714  }
2715  ist->pts = ist->dts;
2716  ist->next_pts = ist->next_dts;
2717  }
2718  for (i = 0; i < nb_output_streams; i++) {
2720 
2721  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2722  continue;
2723 
2724  do_streamcopy(ist, ost, pkt);
2725  }
2726 
2727  return !eof_reached;
2728 }
2729 
2730 static void print_sdp(void)
2731 {
2732  char sdp[16384];
2733  int i;
2734  int j;
2735  AVIOContext *sdp_pb;
2736  AVFormatContext **avc;
2737 
2738  for (i = 0; i < nb_output_files; i++) {
2739  if (!output_files[i]->header_written)
2740  return;
2741  }
2742 
2743  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2744  if (!avc)
2745  exit_program(1);
2746  for (i = 0, j = 0; i < nb_output_files; i++) {
2747  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2748  avc[j] = output_files[i]->ctx;
2749  j++;
2750  }
2751  }
2752 
2753  if (!j)
2754  goto fail;
2755 
2756  av_sdp_create(avc, j, sdp, sizeof(sdp));
2757 
2758  if (!sdp_filename) {
2759  printf("SDP:\n%s\n", sdp);
2760  fflush(stdout);
2761  } else {
2762  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2763  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2764  } else {
2765  avio_print(sdp_pb, sdp);
2766  avio_closep(&sdp_pb);
2768  }
2769  }
2770 
2771 fail:
2772  av_freep(&avc);
2773 }
2774 
2776 {
2777  InputStream *ist = s->opaque;
2778  const enum AVPixelFormat *p;
2779  int ret;
2780 
2781  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2783  const AVCodecHWConfig *config = NULL;
2784  int i;
2785 
2786  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2787  break;
2788 
2789  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2790  ist->hwaccel_id == HWACCEL_AUTO) {
2791  for (i = 0;; i++) {
2792  config = avcodec_get_hw_config(s->codec, i);
2793  if (!config)
2794  break;
2795  if (!(config->methods &
2797  continue;
2798  if (config->pix_fmt == *p)
2799  break;
2800  }
2801  }
2802  if (config) {
2803  if (config->device_type != ist->hwaccel_device_type) {
2804  // Different hwaccel offered, ignore.
2805  continue;
2806  }
2807 
2809  if (ret < 0) {
2810  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2812  "%s hwaccel requested for input stream #%d:%d, "
2813  "but cannot be initialized.\n",
2815  ist->file_index, ist->st->index);
2816  return AV_PIX_FMT_NONE;
2817  }
2818  continue;
2819  }
2820  } else {
2821  const HWAccel *hwaccel = NULL;
2822  int i;
2823  for (i = 0; hwaccels[i].name; i++) {
2824  if (hwaccels[i].pix_fmt == *p) {
2825  hwaccel = &hwaccels[i];
2826  break;
2827  }
2828  }
2829  if (!hwaccel) {
2830  // No hwaccel supporting this pixfmt.
2831  continue;
2832  }
2833  if (hwaccel->id != ist->hwaccel_id) {
2834  // Does not match requested hwaccel.
2835  continue;
2836  }
2837 
2838  ret = hwaccel->init(s);
2839  if (ret < 0) {
2841  "%s hwaccel requested for input stream #%d:%d, "
2842  "but cannot be initialized.\n", hwaccel->name,
2843  ist->file_index, ist->st->index);
2844  return AV_PIX_FMT_NONE;
2845  }
2846  }
2847 
2848  if (ist->hw_frames_ctx) {
2849  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2850  if (!s->hw_frames_ctx)
2851  return AV_PIX_FMT_NONE;
2852  }
2853 
2854  ist->hwaccel_pix_fmt = *p;
2855  break;
2856  }
2857 
2858  return *p;
2859 }
2860 
2862 {
2863  InputStream *ist = s->opaque;
2864 
2865  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2866  return ist->hwaccel_get_buffer(s, frame, flags);
2867 
2869 }
2870 
2871 static int init_input_stream(int ist_index, char *error, int error_len)
2872 {
2873  int ret;
2874  InputStream *ist = input_streams[ist_index];
2875 
2876  if (ist->decoding_needed) {
2877  AVCodec *codec = ist->dec;
2878  if (!codec) {
2879  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2880  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2881  return AVERROR(EINVAL);
2882  }
2883 
2884  ist->dec_ctx->opaque = ist;
2885  ist->dec_ctx->get_format = get_format;
2886  ist->dec_ctx->get_buffer2 = get_buffer;
2887  ist->dec_ctx->thread_safe_callbacks = 1;
2888 
2889  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2890  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2891  (ist->decoding_needed & DECODING_FOR_OST)) {
2892  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2894  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2895  }
2896 
2897  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2898 
2899  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2900  * audio, and video decoders such as cuvid or mediacodec */
2901  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2902 
2903  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2904  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2905  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2907  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2908 
2910  if (ret < 0) {
2911  snprintf(error, error_len, "Device setup failed for "
2912  "decoder on input stream #%d:%d : %s",
2913  ist->file_index, ist->st->index, av_err2str(ret));
2914  return ret;
2915  }
2916 
2917  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2918  if (ret == AVERROR_EXPERIMENTAL)
2919  abort_codec_experimental(codec, 0);
2920 
2921  snprintf(error, error_len,
2922  "Error while opening decoder for input stream "
2923  "#%d:%d : %s",
2924  ist->file_index, ist->st->index, av_err2str(ret));
2925  return ret;
2926  }
2928  }
2929 
2930  ist->next_pts = AV_NOPTS_VALUE;
2931  ist->next_dts = AV_NOPTS_VALUE;
2932 
2933  return 0;
2934 }
2935 
2937 {
2938  if (ost->source_index >= 0)
2939  return input_streams[ost->source_index];
2940  return NULL;
2941 }
2942 
2943 static int compare_int64(const void *a, const void *b)
2944 {
2945  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2946 }
2947 
2948 /* open the muxer when all the streams are initialized */
2950 {
2951  int ret, i;
2952 
2953  for (i = 0; i < of->ctx->nb_streams; i++) {
2955  if (!ost->initialized)
2956  return 0;
2957  }
2958 
2959  of->ctx->interrupt_callback = int_cb;
2960 
2961  ret = avformat_write_header(of->ctx, &of->opts);
2962  if (ret < 0) {
2964  "Could not write header for output file #%d "
2965  "(incorrect codec parameters ?): %s\n",
2967  return ret;
2968  }
2969  //assert_avoptions(of->opts);
2970  of->header_written = 1;
2971 
2972  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2973 
2974  if (sdp_filename || want_sdp)
2975  print_sdp();
2976 
2977  /* flush the muxing queues */
2978  for (i = 0; i < of->ctx->nb_streams; i++) {
2980 
2981  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2982  if (!av_fifo_size(ost->muxing_queue))
2983  ost->mux_timebase = ost->st->time_base;
2984 
2985  while (av_fifo_size(ost->muxing_queue)) {
2986  AVPacket pkt;
2987  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2988  write_packet(of, &pkt, ost, 1);
2989  }
2990  }
2991 
2992  return 0;
2993 }
2994 
2996 {
2997  AVBSFContext *ctx = ost->bsf_ctx;
2998  int ret;
2999 
3000  if (!ctx)
3001  return 0;
3002 
3003  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3004  if (ret < 0)
3005  return ret;
3006 
3007  ctx->time_base_in = ost->st->time_base;
3008 
3009  ret = av_bsf_init(ctx);
3010  if (ret < 0) {
3011  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3012  ctx->filter->name);
3013  return ret;
3014  }
3015 
3016  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3017  if (ret < 0)
3018  return ret;
3019  ost->st->time_base = ctx->time_base_out;
3020 
3021  return 0;
3022 }
3023 
3025 {
3026  OutputFile *of = output_files[ost->file_index];
3028  AVCodecParameters *par_dst = ost->st->codecpar;
3029  AVCodecParameters *par_src = ost->ref_par;
3030  AVRational sar;
3031  int i, ret;
3032  uint32_t codec_tag = par_dst->codec_tag;
3033 
3034  av_assert0(ist && !ost->filter);
3035 
3036  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3037  if (ret >= 0)
3038  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3039  if (ret < 0) {
3041  "Error setting up codec context options.\n");
3042  return ret;
3043  }
3044 
3045  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3046  if (ret < 0) {
3048  "Error getting reference codec parameters.\n");
3049  return ret;
3050  }
3051 
3052  if (!codec_tag) {
3053  unsigned int codec_tag_tmp;
3054  if (!of->ctx->oformat->codec_tag ||
3055  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3056  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3057  codec_tag = par_src->codec_tag;
3058  }
3059 
3060  ret = avcodec_parameters_copy(par_dst, par_src);
3061  if (ret < 0)
3062  return ret;
3063 
3064  par_dst->codec_tag = codec_tag;
3065 
3066  if (!ost->frame_rate.num)
3067  ost->frame_rate = ist->framerate;
3068  ost->st->avg_frame_rate = ost->frame_rate;
3069 
3071  if (ret < 0)
3072  return ret;
3073 
3074  // copy timebase while removing common factors
3075  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3077 
3078  // copy estimated duration as a hint to the muxer
3079  if (ost->st->duration <= 0 && ist->st->duration > 0)
3080  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3081 
3082  // copy disposition
3083  ost->st->disposition = ist->st->disposition;
3084 
3085  if (ist->st->nb_side_data) {
3086  for (i = 0; i < ist->st->nb_side_data; i++) {
3087  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3088  uint8_t *dst_data;
3089 
3090  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3091  if (!dst_data)
3092  return AVERROR(ENOMEM);
3093  memcpy(dst_data, sd_src->data, sd_src->size);
3094  }
3095  }
3096 
3097  if (ost->rotate_overridden) {
3099  sizeof(int32_t) * 9);
3100  if (sd)
3101  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3102  }
3103 
3104  switch (par_dst->codec_type) {
3105  case AVMEDIA_TYPE_AUDIO:
3106  if (audio_volume != 256) {
3107  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3108  exit_program(1);
3109  }
3110  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3111  par_dst->block_align= 0;
3112  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3113  par_dst->block_align= 0;
3114  break;
3115  case AVMEDIA_TYPE_VIDEO:
3116  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3117  sar =
3118  av_mul_q(ost->frame_aspect_ratio,
3119  (AVRational){ par_dst->height, par_dst->width });
3120  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3121  "with stream copy may produce invalid files\n");
3122  }
3123  else if (ist->st->sample_aspect_ratio.num)
3124  sar = ist->st->sample_aspect_ratio;
3125  else
3126  sar = par_src->sample_aspect_ratio;
3127  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3128  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3129  ost->st->r_frame_rate = ist->st->r_frame_rate;
3130  break;
3131  }
3132 
3133  ost->mux_timebase = ist->st->time_base;
3134 
3135  return 0;
3136 }
3137 
3139 {
3140  AVDictionaryEntry *e;
3141 
3142  uint8_t *encoder_string;
3143  int encoder_string_len;
3144  int format_flags = 0;
3145  int codec_flags = ost->enc_ctx->flags;
3146 
3147  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3148  return;
3149 
3150  e = av_dict_get(of->opts, "fflags", NULL, 0);
3151  if (e) {
3152  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3153  if (!o)
3154  return;
3155  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3156  }
3157  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3158  if (e) {
3159  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3160  if (!o)
3161  return;
3162  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3163  }
3164 
3165  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3166  encoder_string = av_mallocz(encoder_string_len);
3167  if (!encoder_string)
3168  exit_program(1);
3169 
3170  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3171  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3172  else
3173  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3174  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3175  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3177 }
3178 
3180  AVCodecContext *avctx)
3181 {
3182  char *p;
3183  int n = 1, i, size, index = 0;
3184  int64_t t, *pts;
3185 
3186  for (p = kf; *p; p++)
3187  if (*p == ',')
3188  n++;
3189  size = n;
3190  pts = av_malloc_array(size, sizeof(*pts));
3191  if (!pts) {
3192  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3193  exit_program(1);
3194  }
3195 
3196  p = kf;
3197  for (i = 0; i < n; i++) {
3198  char *next = strchr(p, ',');
3199 
3200  if (next)
3201  *next++ = 0;
3202 
3203  if (!memcmp(p, "chapters", 8)) {
3204 
3205  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3206  int j;
3207 
3208  if (avf->nb_chapters > INT_MAX - size ||
3209  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3210  sizeof(*pts)))) {
3212  "Could not allocate forced key frames array.\n");
3213  exit_program(1);
3214  }
3215  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3216  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3217 
3218  for (j = 0; j < avf->nb_chapters; j++) {
3219  AVChapter *c = avf->chapters[j];
3220  av_assert1(index < size);
3221  pts[index++] = av_rescale_q(c->start, c->time_base,
3222  avctx->time_base) + t;
3223  }
3224 
3225  } else {
3226 
3227  t = parse_time_or_die("force_key_frames", p, 1);
3228  av_assert1(index < size);
3229  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3230 
3231  }
3232 
3233  p = next;
3234  }
3235 
3236  av_assert0(index == size);
3237  qsort(pts, size, sizeof(*pts), compare_int64);
3238  ost->forced_kf_count = size;
3239  ost->forced_kf_pts = pts;
3240 }
3241 
3242 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3243 {
3245  AVCodecContext *enc_ctx = ost->enc_ctx;
3246  AVFormatContext *oc;
3247 
3248  if (ost->enc_timebase.num > 0) {
3249  enc_ctx->time_base = ost->enc_timebase;
3250  return;
3251  }
3252 
3253  if (ost->enc_timebase.num < 0) {
3254  if (ist) {
3255  enc_ctx->time_base = ist->st->time_base;
3256  return;
3257  }
3258 
3259  oc = output_files[ost->file_index]->ctx;
3260  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3261  }
3262 
3263  enc_ctx->time_base = default_time_base;
3264 }
3265 
3267 {
3269  AVCodecContext *enc_ctx = ost->enc_ctx;
3271  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3272  int j, ret;
3273 
3274  set_encoder_id(output_files[ost->file_index], ost);
3275 
3276  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3277  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3278  // which have to be filtered out to prevent leaking them to output files.
3279  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3280 
3281  if (ist) {
3282  ost->st->disposition = ist->st->disposition;
3283 
3284  dec_ctx = ist->dec_ctx;
3285 
3287  } else {
3288  for (j = 0; j < oc->nb_streams; j++) {
3289  AVStream *st = oc->streams[j];
3290  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3291  break;
3292  }
3293  if (j == oc->nb_streams)
3294  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3297  }
3298 
3299  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3300  if (!ost->frame_rate.num)
3301  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3302  if (ist && !ost->frame_rate.num)
3303  ost->frame_rate = ist->framerate;
3304  if (ist && !ost->frame_rate.num)
3305  ost->frame_rate = ist->st->r_frame_rate;
3306  if (ist && !ost->frame_rate.num) {
3307  ost->frame_rate = (AVRational){25, 1};
3309  "No information "
3310  "about the input framerate is available. Falling "
3311  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3312  "if you want a different framerate.\n",
3313  ost->file_index, ost->index);
3314  }
3315 
3316  if (ost->enc->supported_framerates && !ost->force_fps) {
3317  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3318  ost->frame_rate = ost->enc->supported_framerates[idx];
3319  }
3320  // reduce frame rate for mpeg4 to be within the spec limits
3321  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3322  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3323  ost->frame_rate.num, ost->frame_rate.den, 65535);
3324  }
3325  }
3326 
3327  switch (enc_ctx->codec_type) {
3328  case AVMEDIA_TYPE_AUDIO:
3329  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3330  if (dec_ctx)
3332  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3333  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3334  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3335  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3336 
3338  break;
3339 
3340  case AVMEDIA_TYPE_VIDEO:
3341  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3342 
3343  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3344  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3345  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3347  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3348  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3349  }
3350 
3351  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3352  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3353  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3354  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3355  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3356  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3357 
3358  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3359  if (dec_ctx)
3361  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3362 
3363  enc_ctx->framerate = ost->frame_rate;
3364 
3365  ost->st->avg_frame_rate = ost->frame_rate;
3366 
3367  if (!dec_ctx ||
3368  enc_ctx->width != dec_ctx->width ||
3369  enc_ctx->height != dec_ctx->height ||
3370  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3372  }
3373 
3374  if (ost->top_field_first == 0) {
3375  enc_ctx->field_order = AV_FIELD_BB;
3376  } else if (ost->top_field_first == 1) {
3377  enc_ctx->field_order = AV_FIELD_TT;
3378  }
3379 
3380  if (ost->forced_keyframes) {
3381  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3382  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3384  if (ret < 0) {
3386  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3387  return ret;
3388  }
3389  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3390  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3391  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3392  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3393 
3394  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3395  // parse it only for static kf timings
3396  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3397  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3398  }
3399  }
3400  break;
3401  case AVMEDIA_TYPE_SUBTITLE:
3402  enc_ctx->time_base = AV_TIME_BASE_Q;
3403  if (!enc_ctx->width) {
3404  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3405  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3406  }
3407  break;
3408  case AVMEDIA_TYPE_DATA:
3409  break;
3410  default:
3411  abort();
3412  break;
3413  }
3414 
3415  ost->mux_timebase = enc_ctx->time_base;
3416 
3417  return 0;
3418 }
3419 
3420 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3421 {
3422  int ret = 0;
3423 
3424  if (ost->encoding_needed) {
3425  AVCodec *codec = ost->enc;
3426  AVCodecContext *dec = NULL;
3427  InputStream *ist;
3428 
3430  if (ret < 0)
3431  return ret;
3432 
3433  if ((ist = get_input_stream(ost)))
3434  dec = ist->dec_ctx;
3435  if (dec && dec->subtitle_header) {
3436  /* ASS code assumes this buffer is null terminated so add extra byte. */
3437  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3438  if (!ost->enc_ctx->subtitle_header)
3439  return AVERROR(ENOMEM);
3440  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3441  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3442  }
3443  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3444  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3445  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3446  !codec->defaults &&
3447  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3448  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3449  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3450 
3452  if (ret < 0) {
3453  snprintf(error, error_len, "Device setup failed for "
3454  "encoder on output stream #%d:%d : %s",
3455  ost->file_index, ost->index, av_err2str(ret));
3456  return ret;
3457  }
3458 
3459  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3460  int input_props = 0, output_props = 0;
3461  AVCodecDescriptor const *input_descriptor =
3462  avcodec_descriptor_get(dec->codec_id);
3463  AVCodecDescriptor const *output_descriptor =
3464  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3465  if (input_descriptor)
3466  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3467  if (output_descriptor)
3468  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3469  if (input_props && output_props && input_props != output_props) {
3470  snprintf(error, error_len,
3471  "Subtitle encoding currently only possible from text to text "
3472  "or bitmap to bitmap");
3473  return AVERROR_INVALIDDATA;
3474  }
3475  }
3476 
3477  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3478  if (ret == AVERROR_EXPERIMENTAL)
3479  abort_codec_experimental(codec, 1);
3480  snprintf(error, error_len,
3481  "Error while opening encoder for output stream #%d:%d - "
3482  "maybe incorrect parameters such as bit_rate, rate, width or height",
3483  ost->file_index, ost->index);
3484  return ret;
3485  }
3486  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3487  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3488  av_buffersink_set_frame_size(ost->filter->filter,
3489  ost->enc_ctx->frame_size);
3490  assert_avoptions(ost->encoder_opts);
3491  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3492  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3493  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3494  " It takes bits/s as argument, not kbits/s\n");
3495 
3497  if (ret < 0) {
3499  "Error initializing the output stream codec context.\n");
3500  exit_program(1);
3501  }
3502  /*
3503  * FIXME: ost->st->codec should't be needed here anymore.
3504  */
3505  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3506  if (ret < 0)
3507  return ret;
3508 
3509  if (ost->enc_ctx->nb_coded_side_data) {
3510  int i;
3511 
3512  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3513  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3514  uint8_t *dst_data;
3515 
3516  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3517  if (!dst_data)
3518  return AVERROR(ENOMEM);
3519  memcpy(dst_data, sd_src->data, sd_src->size);
3520  }
3521  }
3522 
3523  /*
3524  * Add global input side data. For now this is naive, and copies it
3525  * from the input stream's global side data. All side data should
3526  * really be funneled over AVFrame and libavfilter, then added back to
3527  * packet side data, and then potentially using the first packet for
3528  * global side data.
3529  */
3530  if (ist) {
3531  int i;
3532  for (i = 0; i < ist->st->nb_side_data; i++) {
3533  AVPacketSideData *sd = &ist->st->side_data[i];
3534  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3535  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3536  if (!dst)
3537  return AVERROR(ENOMEM);
3538  memcpy(dst, sd->data, sd->size);
3539  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3540  av_display_rotation_set((uint32_t *)dst, 0);
3541  }
3542  }
3543  }
3544 
3545  // copy timebase while removing common factors
3546  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3547  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3548 
3549  // copy estimated duration as a hint to the muxer
3550  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3551  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3552 
3553  ost->st->codec->codec= ost->enc_ctx->codec;
3554  } else if (ost->stream_copy) {
3556  if (ret < 0)
3557  return ret;
3558  }
3559 
3560  // parse user provided disposition, and update stream values
3561  if (ost->disposition) {
3562  static const AVOption opts[] = {
3563  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3564  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3565  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3566  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3567  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3568  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3569  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3570  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3571  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3572  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3573  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3574  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3575  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3576  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3577  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3578  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3579  { NULL },
3580  };
3581  static const AVClass class = {
3582  .class_name = "",
3583  .item_name = av_default_item_name,
3584  .option = opts,
3585  .version = LIBAVUTIL_VERSION_INT,
3586  };
3587  const AVClass *pclass = &class;
3588 
3589  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3590  if (ret < 0)
3591  return ret;
3592  }
3593 
3594  /* initialize bitstream filters for the output stream
3595  * needs to be done here, because the codec id for streamcopy is not
3596  * known until now */
3598  if (ret < 0)
3599  return ret;
3600 
3601  ost->initialized = 1;
3602 
3603  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3604  if (ret < 0)
3605  return ret;
3606 
3607  return ret;
3608 }
3609 
3610 static void report_new_stream(int input_index, AVPacket *pkt)
3611 {
3612  InputFile *file = input_files[input_index];
3613  AVStream *st = file->ctx->streams[pkt->stream_index];
3614 
3615  if (pkt->stream_index < file->nb_streams_warn)
3616  return;
3617  av_log(file->ctx, AV_LOG_WARNING,
3618  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3620  input_index, pkt->stream_index,
3621  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3622  file->nb_streams_warn = pkt->stream_index + 1;
3623 }
3624 
3625 static int transcode_init(void)
3626 {
3627  int ret = 0, i, j, k;
3628  AVFormatContext *oc;
3629  OutputStream *ost;
3630  InputStream *ist;
3631  char error[1024] = {0};
3632 
3633  for (i = 0; i < nb_filtergraphs; i++) {
3634  FilterGraph *fg = filtergraphs[i];
3635  for (j = 0; j < fg->nb_outputs; j++) {
3636  OutputFilter *ofilter = fg->outputs[j];
3637  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3638  continue;
3639  if (fg->nb_inputs != 1)
3640  continue;
3641  for (k = nb_input_streams-1; k >= 0 ; k--)
3642  if (fg->inputs[0]->ist == input_streams[k])
3643  break;
3644  ofilter->ost->source_index = k;
3645  }
3646  }
3647 
3648  /* init framerate emulation */
3649  for (i = 0; i < nb_input_files; i++) {
3651  if (ifile->rate_emu)
3652  for (j = 0; j < ifile->nb_streams; j++)
3653  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3654  }
3655 
3656  /* init input streams */
3657  for (i = 0; i < nb_input_streams; i++)
3658  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3659  for (i = 0; i < nb_output_streams; i++) {
3660  ost = output_streams[i];
3661  avcodec_close(ost->enc_ctx);
3662  }
3663  goto dump_format;
3664  }
3665 
3666  /* open each encoder */
3667  for (i = 0; i < nb_output_streams; i++) {
3668  // skip streams fed from filtergraphs until we have a frame for them
3669  if (output_streams[i]->filter)
3670  continue;
3671 
3673  if (ret < 0)
3674  goto dump_format;
3675  }
3676 
3677  /* discard unused programs */
3678  for (i = 0; i < nb_input_files; i++) {
3680  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3681  AVProgram *p = ifile->ctx->programs[j];
3682  int discard = AVDISCARD_ALL;
3683 
3684  for (k = 0; k < p->nb_stream_indexes; k++)
3685  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3686  discard = AVDISCARD_DEFAULT;
3687  break;
3688  }
3689  p->discard = discard;
3690  }
3691  }
3692 
3693  /* write headers for files with no streams */
3694  for (i = 0; i < nb_output_files; i++) {
3695  oc = output_files[i]->ctx;
3696  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3698  if (ret < 0)
3699  goto dump_format;
3700  }
3701  }
3702 
3703  dump_format:
3704  /* dump the stream mapping */
3705  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3706  for (i = 0; i < nb_input_streams; i++) {
3707  ist = input_streams[i];
3708 
3709  for (j = 0; j < ist->nb_filters; j++) {
3710  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3711  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3712  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3713  ist->filters[j]->name);
3714  if (nb_filtergraphs > 1)
3715  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3716  av_log(NULL, AV_LOG_INFO, "\n");
3717  }
3718  }
3719  }
3720 
3721  for (i = 0; i < nb_output_streams; i++) {
3722  ost = output_streams[i];
3723 
3724  if (ost->attachment_filename) {
3725  /* an attached file */
3726  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3727  ost->attachment_filename, ost->file_index, ost->index);
3728  continue;
3729  }
3730 
3731  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3732  /* output from a complex graph */
3733  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3734  if (nb_filtergraphs > 1)
3735  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3736 
3737  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3738  ost->index, ost->enc ? ost->enc->name : "?");
3739  continue;
3740  }
3741 
3742  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3743  input_streams[ost->source_index]->file_index,
3744  input_streams[ost->source_index]->st->index,
3745  ost->file_index,
3746  ost->index);
3747  if (ost->sync_ist != input_streams[ost->source_index])
3748  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3749  ost->sync_ist->file_index,
3750  ost->sync_ist->st->index);
3751  if (ost->stream_copy)
3752  av_log(NULL, AV_LOG_INFO, " (copy)");
3753  else {
3754  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3755  const AVCodec *out_codec = ost->enc;
3756  const char *decoder_name = "?";
3757  const char *in_codec_name = "?";
3758  const char *encoder_name = "?";
3759  const char *out_codec_name = "?";
3760  const AVCodecDescriptor *desc;
3761 
3762  if (in_codec) {
3763  decoder_name = in_codec->name;
3764  desc = avcodec_descriptor_get(in_codec->id);
3765  if (desc)
3766  in_codec_name = desc->name;
3767  if (!strcmp(decoder_name, in_codec_name))
3768  decoder_name = "native";
3769  }
3770 
3771  if (out_codec) {
3772  encoder_name = out_codec->name;
3773  desc = avcodec_descriptor_get(out_codec->id);
3774  if (desc)
3775  out_codec_name = desc->name;
3776  if (!strcmp(encoder_name, out_codec_name))
3777  encoder_name = "native";
3778  }
3779 
3780  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3781  in_codec_name, decoder_name,
3782  out_codec_name, encoder_name);
3783  }
3784  av_log(NULL, AV_LOG_INFO, "\n");
3785  }
3786 
3787  if (ret) {
3788  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3789  return ret;
3790  }
3791 
3793 
3794  return 0;
3795 }
3796 
3797 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3798 static int need_output(void)
3799 {
3800  int i;
3801 
3802  for (i = 0; i < nb_output_streams; i++) {
3804  OutputFile *of = output_files[ost->file_index];
3805  AVFormatContext *os = output_files[ost->file_index]->ctx;
3806 
3807  if (ost->finished ||
3808  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3809  continue;
3810  if (ost->frame_number >= ost->max_frames) {
3811  int j;
3812  for (j = 0; j < of->ctx->nb_streams; j++)
3814  continue;
3815  }
3816 
3817  return 1;
3818  }
3819 
3820  return 0;
3821 }
3822 
3823 /**
3824  * Select the output stream to process.
3825  *
3826  * @return selected output stream, or NULL if none available
3827  */
3829 {
3830  int i;
3831  int64_t opts_min = INT64_MAX;
3832  OutputStream *ost_min = NULL;
3833 
3834  for (i = 0; i < nb_output_streams; i++) {
3836  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3837  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3838  AV_TIME_BASE_Q);
3839  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3841  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3842  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3843 
3844  if (!ost->initialized && !ost->inputs_done)
3845  return ost;
3846 
3847  if (!ost->finished && opts < opts_min) {
3848  opts_min = opts;
3849  ost_min = ost->unavailable ? NULL : ost;
3850  }
3851  }
3852  return ost_min;
3853 }
3854 
3855 static void set_tty_echo(int on)
3856 {
3857 #if HAVE_TERMIOS_H
3858  struct termios tty;
3859  if (tcgetattr(0, &tty) == 0) {
3860  if (on) tty.c_lflag |= ECHO;
3861  else tty.c_lflag &= ~ECHO;
3862  tcsetattr(0, TCSANOW, &tty);
3863  }
3864 #endif
3865 }
3866 
3867 static int check_keyboard_interaction(int64_t cur_time)
3868 {
3869  int i, ret, key;
3870  static int64_t last_time;
3871  if (received_nb_signals)
3872  return AVERROR_EXIT;
3873  /* read_key() returns 0 on EOF */
3874  if(cur_time - last_time >= 100000 && !run_as_daemon){
3875  key = read_key();
3876  last_time = cur_time;
3877  }else
3878  key = -1;
3879  if (key == 'q')
3880  return AVERROR_EXIT;
3881  if (key == '+') av_log_set_level(av_log_get_level()+10);
3882  if (key == '-') av_log_set_level(av_log_get_level()-10);
3883  if (key == 's') qp_hist ^= 1;
3884  if (key == 'h'){
3885  if (do_hex_dump){
3886  do_hex_dump = do_pkt_dump = 0;
3887  } else if(do_pkt_dump){
3888  do_hex_dump = 1;
3889  } else
3890  do_pkt_dump = 1;
3892  }
3893  if (key == 'c' || key == 'C'){
3894  char buf[4096], target[64], command[256], arg[256] = {0};
3895  double time;
3896  int k, n = 0;
3897  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3898  i = 0;
3899  set_tty_echo(1);
3900  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3901  if (k > 0)
3902  buf[i++] = k;
3903  buf[i] = 0;
3904  set_tty_echo(0);
3905  fprintf(stderr, "\n");
3906  if (k > 0 &&
3907  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3908  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3909  target, time, command, arg);
3910  for (i = 0; i < nb_filtergraphs; i++) {
3911  FilterGraph *fg = filtergraphs[i];
3912  if (fg->graph) {
3913  if (time < 0) {
3914  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3915  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3916  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3917  } else if (key == 'c') {
3918  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3920  } else {
3921  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3922  if (ret < 0)
3923  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3924  }
3925  }
3926  }
3927  } else {
3929  "Parse error, at least 3 arguments were expected, "
3930  "only %d given in string '%s'\n", n, buf);
3931  }
3932  }
3933  if (key == 'd' || key == 'D'){
3934  int debug=0;
3935  if(key == 'D') {
3936  debug = input_streams[0]->st->codec->debug<<1;
3937  if(!debug) debug = 1;
3938  while(debug & (FF_DEBUG_DCT_COEFF
3939 #if FF_API_DEBUG_MV
3940  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3941 #endif
3942  )) //unsupported, would just crash
3943  debug += debug;
3944  }else{
3945  char buf[32];
3946  int k = 0;
3947  i = 0;
3948  set_tty_echo(1);
3949  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3950  if (k > 0)
3951  buf[i++] = k;
3952  buf[i] = 0;
3953  set_tty_echo(0);
3954  fprintf(stderr, "\n");
3955  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3956  fprintf(stderr,"error parsing debug value\n");
3957  }
3958  for(i=0;i<nb_input_streams;i++) {
3959  input_streams[i]->st->codec->debug = debug;
3960  }
3961  for(i=0;i<nb_output_streams;i++) {
3963  ost->enc_ctx->debug = debug;
3964  }
3965  if(debug) av_log_set_level(AV_LOG_DEBUG);
3966  fprintf(stderr,"debug=%d\n", debug);
3967  }
3968  if (key == '?'){
3969  fprintf(stderr, "key function\n"
3970  "? show this help\n"
3971  "+ increase verbosity\n"
3972  "- decrease verbosity\n"
3973  "c Send command to first matching filter supporting it\n"
3974  "C Send/Queue command to all matching filters\n"
3975  "D cycle through available debug modes\n"
3976  "h dump packets/hex press to cycle through the 3 states\n"
3977  "q quit\n"
3978  "s Show QP histogram\n"
3979  );
3980  }
3981  return 0;
3982 }
3983 
3984 #if HAVE_THREADS
3985 static void *input_thread(void *arg)
3986 {
3987  InputFile *f = arg;
3988  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3989  int ret = 0;
3990 
3991  while (1) {
3992  AVPacket pkt;
3993  ret = av_read_frame(f->ctx, &pkt);
3994 
3995  if (ret == AVERROR(EAGAIN)) {
3996  av_usleep(10000);
3997  continue;
3998  }
3999  if (ret < 0) {
4000  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4001  break;
4002  }
4003  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4004  if (flags && ret == AVERROR(EAGAIN)) {
4005  flags = 0;
4006  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4007  av_log(f->ctx, AV_LOG_WARNING,
4008  "Thread message queue blocking; consider raising the "
4009  "thread_queue_size option (current value: %d)\n",
4010  f->thread_queue_size);
4011  }
4012  if (ret < 0) {
4013  if (ret != AVERROR_EOF)
4014  av_log(f->ctx, AV_LOG_ERROR,
4015  "Unable to send packet to main thread: %s\n",
4016  av_err2str(ret));
4017  av_packet_unref(&pkt);
4018  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4019  break;
4020  }
4021  }
4022 
4023  return NULL;
4024 }
4025 
4026 static void free_input_thread(int i)
4027 {
4028  InputFile *f = input_files[i];
4029  AVPacket pkt;
4030 
4031  if (!f || !f->in_thread_queue)
4032  return;
4034  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4035  av_packet_unref(&pkt);
4036 
4037  pthread_join(f->thread, NULL);
4038  f->joined = 1;
4039  av_thread_message_queue_free(&f->in_thread_queue);
4040 }
4041 
4042 static void free_input_threads(void)
4043 {
4044  int i;
4045 
4046  for (i = 0; i < nb_input_files; i++)
4047  free_input_thread(i);
4048 }
4049 
4050 static int init_input_thread(int i)
4051 {
4052  int ret;
4053  InputFile *f = input_files[i];
4054 
4055  if (nb_input_files == 1)
4056  return 0;
4057 
4058  if (f->ctx->pb ? !f->ctx->pb->seekable :
4059  strcmp(f->ctx->iformat->name, "lavfi"))
4060  f->non_blocking = 1;
4061  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4062  f->thread_queue_size, sizeof(AVPacket));
4063  if (ret < 0)
4064  return ret;
4065 
4066  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4067  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4068  av_thread_message_queue_free(&f->in_thread_queue);
4069  return AVERROR(ret);
4070  }
4071 
4072  return 0;
4073 }
4074 
4075 static int init_input_threads(void)
4076 {
4077  int i, ret;
4078 
4079  for (i = 0; i < nb_input_files; i++) {
4080  ret = init_input_thread(i);
4081  if (ret < 0)
4082  return ret;
4083  }
4084  return 0;
4085 }
4086 
4087 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4088 {
4089  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4090  f->non_blocking ?
4092 }
4093 #endif
4094 
4096 {
4097  if (f->rate_emu) {
4098  int i;
4099  for (i = 0; i < f->nb_streams; i++) {
4100  InputStream *ist = input_streams[f->ist_index + i];
4101  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4102  int64_t now = av_gettime_relative() - ist->start;
4103  if (pts > now)
4104  return AVERROR(EAGAIN);
4105  }
4106  }
4107 
4108 #if HAVE_THREADS
4109  if (nb_input_files > 1)
4110  return get_input_packet_mt(f, pkt);
4111 #endif
4112  return av_read_frame(f->ctx, pkt);
4113 }
4114 
4115 static int got_eagain(void)
4116 {
4117  int i;
4118  for (i = 0; i < nb_output_streams; i++)
4119  if (output_streams[i]->unavailable)
4120  return 1;
4121  return 0;
4122 }
4123 
4124 static void reset_eagain(void)
4125 {
4126  int i;
4127  for (i = 0; i < nb_input_files; i++)
4128  input_files[i]->eagain = 0;
4129  for (i = 0; i < nb_output_streams; i++)
4130  output_streams[i]->unavailable = 0;
4131 }
4132 
4133 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4134 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4135  AVRational time_base)
4136 {
4137  int ret;
4138 
4139  if (!*duration) {
4140  *duration = tmp;
4141  return tmp_time_base;
4142  }
4143 
4144  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4145  if (ret < 0) {
4146  *duration = tmp;
4147  return tmp_time_base;
4148  }
4149 
4150  return time_base;
4151 }
4152 
4154 {
4155  InputStream *ist;
4156  AVCodecContext *avctx;
4157  int i, ret, has_audio = 0;
4158  int64_t duration = 0;
4159 
4160  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4161  if (ret < 0)
4162  return ret;
4163 
4164  for (i = 0; i < ifile->nb_streams; i++) {
4165  ist = input_streams[ifile->ist_index + i];
4166  avctx = ist->dec_ctx;
4167 
4168  /* duration is the length of the last frame in a stream
4169  * when audio stream is present we don't care about
4170  * last video frame length because it's not defined exactly */
4171  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4172  has_audio = 1;
4173  }
4174 
4175  for (i = 0; i < ifile->nb_streams; i++) {
4176  ist = input_streams[ifile->ist_index + i];
4177  avctx = ist->dec_ctx;
4178 
4179  if (has_audio) {
4180  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4181  AVRational sample_rate = {1, avctx->sample_rate};
4182 
4184  } else {
4185  continue;
4186  }
4187  } else {
4188  if (ist->framerate.num) {
4189  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4190  } else if (ist->st->avg_frame_rate.num) {
4192  } else {
4193  duration = 1;
4194  }
4195  }
4196  if (!ifile->duration)
4197  ifile->time_base = ist->st->time_base;
4198  /* the total duration of the stream, max_pts - min_pts is
4199  * the duration of the stream without the last frame */
4200  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4201  duration += ist->max_pts - ist->min_pts;
4202  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4203  ifile->time_base);
4204  }
4205 
4206  if (ifile->loop > 0)
4207  ifile->loop--;
4208 
4209  return ret;
4210 }
4211 
4212 /*
4213  * Return
4214  * - 0 -- one packet was read and processed
4215  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4216  * this function should be called again
4217  * - AVERROR_EOF -- this function should not be called again
4218  */
4219 static int process_input(int file_index)
4220 {
4221  InputFile *ifile = input_files[file_index];
4223  InputStream *ist;
4224  AVPacket pkt;
4225  int ret, thread_ret, i, j;
4226  int64_t duration;
4227  int64_t pkt_dts;
4228  int disable_discontinuity_correction = copy_ts;
4229 
4230  is = ifile->ctx;
4232 
4233  if (ret == AVERROR(EAGAIN)) {
4234  ifile->eagain = 1;
4235  return ret;
4236  }
4237  if (ret < 0 && ifile->loop) {
4238  AVCodecContext *avctx;
4239  for (i = 0; i < ifile->nb_streams; i++) {
4240  ist = input_streams[ifile->ist_index + i];
4241  avctx = ist->dec_ctx;
4242  if (ist->decoding_needed) {
4243  ret = process_input_packet(ist, NULL, 1);
4244  if (ret>0)
4245  return 0;
4246  avcodec_flush_buffers(avctx);
4247  }
4248  }
4249 #if HAVE_THREADS
4250  free_input_thread(file_index);
4251 #endif
4252  ret = seek_to_start(ifile, is);
4253 #if HAVE_THREADS
4254  thread_ret = init_input_thread(file_index);
4255  if (thread_ret < 0)
4256  return thread_ret;
4257 #endif
4258  if (ret < 0)
4259  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4260  else
4262  if (ret == AVERROR(EAGAIN)) {
4263  ifile->eagain = 1;
4264  return ret;
4265  }
4266  }
4267  if (ret < 0) {
4268  if (ret != AVERROR_EOF) {
4269  print_error(is->url, ret);
4270  if (exit_on_error)
4271  exit_program(1);
4272  }
4273 
4274  for (i = 0; i < ifile->nb_streams; i++) {
4275  ist = input_streams[ifile->ist_index + i];
4276  if (ist->decoding_needed) {
4277  ret = process_input_packet(ist, NULL, 0);
4278  if (ret>0)
4279  return 0;
4280  }
4281 
4282  /* mark all outputs that don't go through lavfi as finished */
4283  for (j = 0; j < nb_output_streams; j++) {
4285 
4286  if (ost->source_index == ifile->ist_index + i &&
4287  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4289  }
4290  }
4291 
4292  ifile->eof_reached = 1;
4293  return AVERROR(EAGAIN);
4294  }
4295 
4296  reset_eagain();
4297 
4298  if (do_pkt_dump) {
4300  is->streams[pkt.stream_index]);
4301  }
4302  /* the following test is needed in case new streams appear
4303  dynamically in stream : we ignore them */
4304  if (pkt.stream_index >= ifile->nb_streams) {
4305  report_new_stream(file_index, &pkt);
4306  goto discard_packet;
4307  }
4308 
4309  ist = input_streams[ifile->ist_index + pkt.stream_index];
4310 
4311  ist->data_size += pkt.size;
4312  ist->nb_packets++;
4313 
4314  if (ist->discard)
4315  goto discard_packet;
4316 
4317  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4319  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4320  if (exit_on_error)
4321  exit_program(1);
4322  }
4323 
4324  if (debug_ts) {
4325  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4326  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4334  }
4335 
4336  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4337  int64_t stime, stime2;
4338  // Correcting starttime based on the enabled streams
4339  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4340  // so we instead do it here as part of discontinuity handling
4341  if ( ist->next_dts == AV_NOPTS_VALUE
4342  && ifile->ts_offset == -is->start_time
4343  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4344  int64_t new_start_time = INT64_MAX;
4345  for (i=0; i<is->nb_streams; i++) {
4346  AVStream *st = is->streams[i];
4347  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4348  continue;
4349  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4350  }
4351  if (new_start_time > is->start_time) {
4352  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4353  ifile->ts_offset = -new_start_time;
4354  }
4355  }
4356 
4357  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4358  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4359  ist->wrap_correction_done = 1;
4360 
4361  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4362  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4363  ist->wrap_correction_done = 0;
4364  }
4365  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4366  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4367  ist->wrap_correction_done = 0;
4368  }
4369  }
4370 
4371  /* add the stream-global side data to the first packet */
4372  if (ist->nb_packets == 1) {
4373  for (i = 0; i < ist->st->nb_side_data; i++) {
4374  AVPacketSideData *src_sd = &ist->st->side_data[i];
4375  uint8_t *dst_data;
4376 
4377  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4378  continue;
4379 
4380  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4381  continue;
4382 
4383  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4384  if (!dst_data)
4385  exit_program(1);
4386 
4387  memcpy(dst_data, src_sd->data, src_sd->size);
4388  }
4389  }
4390 
4391  if (pkt.dts != AV_NOPTS_VALUE)
4392  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4393  if (pkt.pts != AV_NOPTS_VALUE)
4394  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4395 
4396  if (pkt.pts != AV_NOPTS_VALUE)
4397  pkt.pts *= ist->ts_scale;
4398  if (pkt.dts != AV_NOPTS_VALUE)
4399  pkt.dts *= ist->ts_scale;
4400 
4402  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4404  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4405  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4406  int64_t delta = pkt_dts - ifile->last_ts;
4407  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4409  ifile->ts_offset -= delta;
4411  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4412  delta, ifile->ts_offset);
4414  if (pkt.pts != AV_NOPTS_VALUE)
4416  }
4417  }
4418 
4419  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4420  if (pkt.pts != AV_NOPTS_VALUE) {
4421  pkt.pts += duration;
4422  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4423  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4424  }
4425 
4426  if (pkt.dts != AV_NOPTS_VALUE)
4427  pkt.dts += duration;
4428 
4430 
4431  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4432  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4433  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4434  ist->st->time_base, AV_TIME_BASE_Q,
4436  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4437  disable_discontinuity_correction = 0;
4438  }
4439 
4440  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4442  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4443  !disable_discontinuity_correction) {
4444  int64_t delta = pkt_dts - ist->next_dts;
4445  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4446  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4448  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4449  ifile->ts_offset -= delta;
4451  "timestamp discontinuity for stream #%d:%d "
4452  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4453  ist->file_index, ist->st->index, ist->st->id,
4455  delta, ifile->ts_offset);
4457  if (pkt.pts != AV_NOPTS_VALUE)
4459  }
4460  } else {
4461  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4463  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4465  }
4466  if (pkt.pts != AV_NOPTS_VALUE){
4467  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4468  delta = pkt_pts - ist->next_dts;
4469  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4471  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4473  }
4474  }
4475  }
4476  }
4477 
4478  if (pkt.dts != AV_NOPTS_VALUE)
4479  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4480 
4481  if (debug_ts) {
4482  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4488  }
4489 
4490  sub2video_heartbeat(ist, pkt.pts);
4491 
4492  process_input_packet(ist, &pkt, 0);
4493 
4494 discard_packet:
4495  av_packet_unref(&pkt);
4496 
4497  return 0;
4498 }
4499 
4500 /**
4501  * Perform a step of transcoding for the specified filter graph.
4502  *
4503  * @param[in] graph filter graph to consider
4504  * @param[out] best_ist input stream where a frame would allow to continue
4505  * @return 0 for success, <0 for error
4506  */
4507 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4508 {
4509  int i, ret;
4510  int nb_requests, nb_requests_max = 0;
4511  InputFilter *ifilter;
4512  InputStream *ist;
4513 
4514  *best_ist = NULL;
4516  if (ret >= 0)
4517  return reap_filters(0);
4518 
4519  if (ret == AVERROR_EOF) {
4520  ret = reap_filters(1);
4521  for (i = 0; i < graph->nb_outputs; i++)
4522  close_output_stream(graph->outputs[i]->ost);
4523  return ret;
4524  }
4525  if (ret != AVERROR(EAGAIN))
4526  return ret;
4527 
4528  for (i = 0; i < graph->nb_inputs; i++) {
4529  ifilter = graph->inputs[i];
4530  ist = ifilter->ist;
4531  if (input_files[ist->file_index]->eagain ||
4533  continue;
4534  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4535  if (nb_requests > nb_requests_max) {
4536  nb_requests_max = nb_requests;
4537  *best_ist = ist;
4538  }
4539  }
4540 
4541  if (!*best_ist)
4542  for (i = 0; i < graph->nb_outputs; i++)
4543  graph->outputs[i]->ost->unavailable = 1;
4544 
4545  return 0;
4546 }
4547 
4548 /**
4549  * Run a single step of transcoding.
4550  *
4551  * @return 0 for success, <0 for error
4552  */
4553 static int transcode_step(void)
4554 {
4555  OutputStream *ost;
4556  InputStream *ist = NULL;
4557  int ret;
4558 
4559  ost = choose_output();
4560  if (!ost) {
4561  if (got_eagain()) {
4562  reset_eagain();
4563  av_usleep(10000);
4564  return 0;
4565  }
4566  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4567  return AVERROR_EOF;
4568  }
4569 
4570  if (ost->filter && !ost->filter->graph->graph) {
4571  if (ifilter_has_all_input_formats(ost->filter->graph)) {
4572  ret = configure_filtergraph(ost->filter->graph);
4573  if (ret < 0) {
4574  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4575  return ret;
4576  }
4577  }
4578  }
4579 
4580  if (ost->filter && ost->filter->graph->graph) {
4581  if (!ost->initialized) {
4582  char error[1024] = {0};
4583  ret = init_output_stream(ost, error, sizeof(error));
4584  if (ret < 0) {
4585  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4586  ost->file_index, ost->index, error);
4587  exit_program(1);
4588  }
4589  }
4590  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4591  return ret;
4592  if (!ist)
4593  return 0;
4594  } else if (ost->filter) {
4595  int i;
4596  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4597  InputFilter *ifilter = ost->filter->graph->inputs[i];
4598  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4599  ist = ifilter->ist;
4600  break;
4601  }
4602  }
4603  if (!ist) {
4604  ost->inputs_done = 1;
4605  return 0;
4606  }
4607  } else {
4608  av_assert0(ost->source_index >= 0);
4609  ist = input_streams[ost->source_index];
4610  }
4611 
4612  ret = process_input(ist->file_index);
4613  if (ret == AVERROR(EAGAIN)) {
4614  if (input_files[ist->file_index]->eagain)
4615  ost->unavailable = 1;
4616  return 0;
4617  }
4618 
4619  if (ret < 0)
4620  return ret == AVERROR_EOF ? 0 : ret;
4621 
4622  return reap_filters(0);
4623 }
4624 
4625 /*
4626  * The following code is the main loop of the file converter
4627  */
4628 static int transcode(void)
4629 {
4630  int ret, i;
4631  AVFormatContext *os;
4632  OutputStream *ost;
4633  InputStream *ist;
4634  int64_t timer_start;
4635  int64_t total_packets_written = 0;
4636 
4637  ret = transcode_init();
4638  if (ret < 0)
4639  goto fail;
4640 
4641  if (stdin_interaction) {
4642  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4643  }
4644 
4645  timer_start = av_gettime_relative();
4646 
4647 #if HAVE_THREADS
4648  if ((ret = init_input_threads()) < 0)
4649  goto fail;
4650 #endif
4651 
4652  while (!received_sigterm) {
4653  int64_t cur_time= av_gettime_relative();
4654 
4655  /* if 'q' pressed, exits */
4656  if (stdin_interaction)
4657  if (check_keyboard_interaction(cur_time) < 0)
4658  break;
4659 
4660  /* check if there's any stream where output is still needed */
4661  if (!need_output()) {
4662  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4663  break;
4664  }
4665 
4666  ret = transcode_step();
4667  if (ret < 0 && ret != AVERROR_EOF) {
4668  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4669  break;
4670  }
4671 
4672  /* dump report by using the output first video and audio streams */
4673  print_report(0, timer_start, cur_time);
4674  }
4675 #if HAVE_THREADS
4676  free_input_threads();
4677 #endif
4678 
4679  /* at the end of stream, we must flush the decoder buffers */
4680  for (i = 0; i < nb_input_streams; i++) {
4681  ist = input_streams[i];
4682  if (!input_files[ist->file_index]->eof_reached) {
4683  process_input_packet(ist, NULL, 0);
4684  }
4685  }
4686  flush_encoders();
4687 
4688  term_exit();
4689 
4690  /* write the trailer if needed and close file */
4691  for (i = 0; i < nb_output_files; i++) {
4692  os = output_files[i]->ctx;
4693  if (!output_files[i]->header_written) {
4695  "Nothing was written into output file %d (%s), because "
4696  "at least one of its streams received no packets.\n",
4697  i, os->url);
4698  continue;
4699  }
4700  if ((ret = av_write_trailer(os)) < 0) {
4701  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4702  if (exit_on_error)
4703  exit_program(1);
4704  }
4705  }
4706 
4707  /* dump report by using the first video and audio streams */
4708  print_report(1, timer_start, av_gettime_relative());
4709 
4710  /* close each encoder */
4711  for (i = 0; i < nb_output_streams; i++) {
4712  ost = output_streams[i];
4713  if (ost->encoding_needed) {
4714  av_freep(&ost->enc_ctx->stats_in);
4715  }
4716  total_packets_written += ost->packets_written;
4717  if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4718  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4719  exit_program(1);
4720  }
4721  }
4722 
4723  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4724  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4725  exit_program(1);
4726  }
4727 
4728  /* close each decoder */
4729  for (i = 0; i < nb_input_streams; i++) {
4730  ist = input_streams[i];
4731  if (ist->decoding_needed) {
4732  avcodec_close(ist->dec_ctx);
4733  if (ist->hwaccel_uninit)
4734  ist->hwaccel_uninit(ist->dec_ctx);
4735  }
4736  }
4737 
4739 
4740  /* finished ! */
4741  ret = 0;
4742 
4743  fail:
4744 #if HAVE_THREADS
4745  free_input_threads();
4746 #endif
4747 
4748  if (output_streams) {
4749  for (i = 0; i < nb_output_streams; i++) {
4750  ost = output_streams[i];
4751  if (ost) {
4752  if (ost->logfile) {
4753  if (fclose(ost->logfile))
4755  "Error closing logfile, loss of information possible: %s\n",
4756  av_err2str(AVERROR(errno)));
4757  ost->logfile = NULL;
4758  }
4759  av_freep(&ost->forced_kf_pts);
4760  av_freep(&ost->apad);
4762  av_dict_free(&ost->encoder_opts);
4763  av_dict_free(&ost->sws_dict);
4764  av_dict_free(&ost->swr_opts);
4765  av_dict_free(&ost->resample_opts);
4766  }
4767  }
4768  }
4769  return ret;
4770 }
4771 
4773 {
4774  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4775 #if HAVE_GETRUSAGE
4776  struct rusage rusage;
4777 
4778  getrusage(RUSAGE_SELF, &rusage);
4779  time_stamps.user_usec =
4780  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4781  time_stamps.sys_usec =
4782  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4783 #elif HAVE_GETPROCESSTIMES
4784  HANDLE proc;
4785  FILETIME c, e, k, u;
4786  proc = GetCurrentProcess();
4787  GetProcessTimes(proc, &c, &e, &k, &u);
4788  time_stamps.user_usec =
4789  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4790  time_stamps.sys_usec =
4791  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4792 #else
4793  time_stamps.user_usec = time_stamps.sys_usec = 0;
4794 #endif
4795  return time_stamps;
4796 }
4797 
4798 static int64_t getmaxrss(void)
4799 {
4800 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4801  struct rusage rusage;
4802  getrusage(RUSAGE_SELF, &rusage);
4803  return (int64_t)rusage.ru_maxrss * 1024;
4804 #elif HAVE_GETPROCESSMEMORYINFO
4805  HANDLE proc;
4806  PROCESS_MEMORY_COUNTERS memcounters;
4807  proc = GetCurrentProcess();
4808  memcounters.cb = sizeof(memcounters);
4809  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4810  return memcounters.PeakPagefileUsage;
4811 #else
4812  return 0;
4813 #endif
4814 }
4815 
4816 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4817 {
4818 }
4819 
4820 int main(int argc, char **argv)
4821 {
4822  int i, ret;
4824 
4825  init_dynload();
4826 
4828 
4829  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4830 
4832  parse_loglevel(argc, argv, options);
4833 
4834  if(argc>1 && !strcmp(argv[1], "-d")){
4835  run_as_daemon=1;
4837  argc--;
4838  argv++;
4839  }
4840 
4841 #if CONFIG_AVDEVICE
4843 #endif
4845 
4846  show_banner(argc, argv, options);
4847 
4848  /* parse options and open all input/output files */
4849  ret = ffmpeg_parse_options(argc, argv);
4850  if (ret < 0)
4851  exit_program(1);
4852 
4853  if (nb_output_files <= 0 && nb_input_files == 0) {
4854  show_usage();
4855  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4856  exit_program(1);
4857  }
4858 
4859  /* file converter / grab */
4860  if (nb_output_files <= 0) {
4861  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4862  exit_program(1);
4863  }
4864 
4865  for (i = 0; i < nb_output_files; i++) {
4866  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4867  want_sdp = 0;
4868  }
4869 
4871  if (transcode() < 0)
4872  exit_program(1);
4873  if (do_benchmark) {
4874  int64_t utime, stime, rtime;
4876  utime = current_time.user_usec - ti.user_usec;
4877  stime = current_time.sys_usec - ti.sys_usec;
4878  rtime = current_time.real_usec - ti.real_usec;
4880  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4881  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4882  }
4883  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4886  exit_program(69);
4887 
4889  return main_return_code;
4890 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:29
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:191
AVSubtitle
Definition: avcodec.h:2694
print_sdp
static void print_sdp(void)
Definition: ffmpeg.c:2730
avcodec_close
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1143
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:347
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:247
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
AVCodec
AVCodec.
Definition: codec.h:190
pthread_join
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:691
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ifilter_parameters_from_codecpar
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1834
need_output
static int need_output(void)
Definition: ffmpeg.c:3798
audio_sync_method
int audio_sync_method
Definition: ffmpeg_opt.c:152
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1963
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:1345
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:72
level
uint8_t level
Definition: svq3.c:210
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:413
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:159
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:428
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:4124
InputStream::hwaccel_device
char * hwaccel_device
Definition: ffmpeg.h:366
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
seek_to_start
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4153
AVOutputFormat::name
const char * name
Definition: avformat.h:491
VSYNC_PASSTHROUGH
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:150
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2699
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1011
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:345
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:778
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
forced_keyframes_const_names
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:42
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
libm.h
video_sync_method
int video_sync_method
Definition: ffmpeg_opt.c:153
InputFilter::width
int width
Definition: ffmpeg.h:246
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1262
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
AVCodecHWConfig::methods
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:439
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1186
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:427
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:283
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1564
AVCodecContext::thread_safe_callbacks
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:1814
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:380
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
thread.h
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:465
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:288
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:833
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:920
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:117
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:280
InputFile::nb_streams_warn
int nb_streams_warn
Definition: ffmpeg.h:411
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2127
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:649
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:154
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:303
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:462
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4553
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:125
AVSubtitleRect
Definition: avcodec.h:2659
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2698
AV_DISPOSITION_DEFAULT
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:810
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:144
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:211
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AV_THREAD_MESSAGE_NONBLOCK
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1403
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
AVFrame::width
int width
Definition: frame.h:358
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:513
AVPacketSideData
Definition: packet.h:298
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:209
w
uint8_t w
Definition: llviddspenc.c:38
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:978
FKF_T
@ FKF_T
Definition: ffmpeg.h:429
AVPacket::data
uint8_t * data
Definition: packet.h:355
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:142
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
finish_output_stream
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1395
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:452
AVOption
AVOption.
Definition: opt.h:246
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
FilterGraph::index
int index
Definition: ffmpeg.h:282
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:938
VSYNC_VSCFR
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
AVStream::cur_dts
int64_t cur_dts
Definition: avformat.h:1068
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:359
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:579
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
transcode
static int transcode(void)
Definition: ffmpeg.c:4628
VSYNC_AUTO
#define VSYNC_AUTO
Definition: ffmpeg.h:49
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:126
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:143
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:3261
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:900
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:289
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:373
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:64
max
#define max(a, b)
Definition: cuda_runtime.h:33
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:537
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
OutputFilter::sample_rates
int * sample_rates
Definition: ffmpeg.h:278
check_recording_time
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:887
VSYNC_CFR
#define VSYNC_CFR
Definition: ffmpeg.h:51
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1780
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2260
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
InputStream::hwaccel_get_buffer
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:372
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
InputStream::decoding_needed
int decoding_needed
Definition: ffmpeg.h:299
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1847
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
ost
static AVStream * ost
Definition: vaapi_transcode.c:45
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
sample_rate
sample_rate
Definition: ffmpeg_filter.c:192
get_input_packet
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4095
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:167
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:328
AVBSFContext
The bitstream filter state.
Definition: bsf.h:49
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:188
want_sdp
static int want_sdp
Definition: ffmpeg.c:140
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:120
InputFilter::channel_layout
uint64_t channel_layout
Definition: ffmpeg.h:251
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:236
do_video_out
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1043
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:395
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
InputStream
Definition: ffmpeg.h:294
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:2069
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:163
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4498
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:4115
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2184
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1613
nb_frames_drop
static int nb_frames_drop
Definition: ffmpeg.c:137
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1085
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:148
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
fifo.h
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1141
finish
static void finish(void)
Definition: movenc.c:345
avcodec_parameters_free
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2098
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:173
hwaccels
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:133
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:123
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:239
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
InputStream::decoder_opts
AVDictionary * decoder_opts
Definition: ffmpeg.h:331
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1260
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
check_init_output_file
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2949
InputStream::filter_in_rescale_delta_last
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:318
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
AVChapter
Definition: avformat.h:1292
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:176
InputStream::nb_packets
uint64_t nb_packets
Definition: ffmpeg.h:382
AV_DISPOSITION_FORCED
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:822
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:647
av_thread_message_queue_recv
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
Definition: threadmessage.c:172
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:303
InputFilter::frame_queue
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:241
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:276
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
avcodec_copy_context
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:216
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:411
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:914
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:41
OutputFile::opts
AVDictionary * opts
Definition: ffmpeg.h:556
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputStream::sub2video::last_pts
int64_t last_pts
Definition: ffmpeg.h:346
loop
static int loop
Definition: ffplay.c:341
do_pkt_dump
int do_pkt_dump
Definition: ffmpeg_opt.c:159
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5059
InputFile
Definition: ffmpeg.h:393
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3024
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1124
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:509
AVCodecContext::get_buffer2
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1341
AV_DISPOSITION_CLEAN_EFFECTS
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:825
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:491
InputStream::hwaccel_pix_fmt
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:374
OutputFile::shortest
int shortest
Definition: ffmpeg.h:562
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
avassert.h
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:312
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:434
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:389
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:600
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:156
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3828
AVStream::first_dts
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1067
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:124
media_type_string
#define media_type_string
Definition: cmdutils.h:617
duration
int64_t duration
Definition: movenc.c:63
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5561
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1655
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:61
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
init_output_stream
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3420
input_streams
InputStream ** input_streams
Definition: ffmpeg.c:147
llrintf
#define llrintf(x)
Definition: libm.h:399
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
InputStream::cfr_next_pts
int64_t cfr_next_pts
Definition: ffmpeg.h:325
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:592
get_benchmark_time_stamps
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4772
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:290
vstats_filename
char * vstats_filename
Definition: ffmpeg_opt.c:144
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2096
close_output_stream
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:839
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:332
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:606
AVFormatContext::chapters
AVChapter ** chapters
Definition: avformat.h:1565
AVDictionaryEntry::key
char * key
Definition: dict.h:82
ENCODER_FINISHED
@ ENCODER_FINISHED
Definition: ffmpeg.h:439
frame_size
int frame_size
Definition: mxfenc.c:2137
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:217
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:126
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:658
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
InputFilter
Definition: ffmpeg.h:234
get_input_stream
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2936
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
term_init
void term_init(void)
Definition: ffmpeg.c:395
do_streamcopy
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1980
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:260
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AVPacketSideData::data
uint8_t * data
Definition: packet.h:299
MUXER_FINISHED
@ MUXER_FINISHED
Definition: ffmpeg.h:440
ctx
AVFormatContext * ctx
Definition: movenc.c:48
InputStream::sub2video::sub_queue
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:348
abort_codec_experimental
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:667
InputStream::filters
InputFilter ** filters
Definition: ffmpeg.h:358
limits.h
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
max_error_rate
float max_error_rate
Definition: ffmpeg_opt.c:170
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:434
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2700
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
OutputFile::header_written
int header_written
Definition: ffmpeg.h:564
on
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
Definition: writing_filters.txt:34
term_exit
void term_exit(void)
Definition: ffmpeg.c:336
AVOutputFormat::codec_tag
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:516
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
compare_int64
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2943
InputStream::hwaccel_retrieve_data
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:373
AV_CODEC_ID_CODEC2
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:477
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:237
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1709
key
const char * key
Definition: hwcontext_opencl.c:168
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
av_fifo_realloc2
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AV_FIELD_BT
@ AV_FIELD_BT
Definition: codec_par.h:42
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
assert_avoptions
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:658
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
process_input_packet
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2548
av_rescale_delta
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:871
process_input
static int process_input(int file_index)
Definition: ffmpeg.c:4219
avformat_write_header
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:505
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
int32_t
int32_t
Definition: audio_convert.c:194
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1660
arg
const char * arg
Definition: jacosubdec.c:66
pthread_create
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
avio_flush
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:233
AVCodecParserContext::repeat_pict
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:3371
output_streams
OutputStream ** output_streams
Definition: ffmpeg.c:152
transcode_from_filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4507
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
InputStream::pts
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:315
AVFormatContext
Format I/O context.
Definition: avformat.h:1335
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1012
run_as_daemon
static int run_as_daemon
Definition: ffmpeg.c:134
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
print_final_stats
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1518
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:262
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:894
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
InputStream::sub2video::w
int w
Definition: ffmpeg.h:350
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
HWAccel::id
enum HWAccelID id
Definition: ffmpeg.h:69
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
InputStream::top_field_first
int top_field_first
Definition: ffmpeg.h:333
InputStream::st
AVStream * st
Definition: ffmpeg.h:296
main
int main(int argc, char **argv)
Definition: ffmpeg.c:4820
update_benchmark
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:672
avio_print
#define avio_print(s,...)
Write strings (const char *) to the context.
Definition: avio.h:594
HWAccel
Definition: ffmpeg.h:66
AVCodec::type
enum AVMediaType type
Definition: codec.h:203
send_frame_to_filters
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2234
decode_video
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2322
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
getmaxrss
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4798
InputStream::next_pts
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:314
frame_bits_per_raw_sample
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:169
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:301
AV_DISPOSITION_COMMENT
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:813
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:393
check_keyboard_interaction
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3867
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1377
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:445
src
#define src
Definition: vp8dsp.c:254
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
parseutils.h
InputStream::hwaccel_id
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:364
InputFilter::channels
int channels
Definition: ffmpeg.h:250
mathops.h
duration_max
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4134
InputStream::dec
AVCodec * dec
Definition: ffmpeg.h:304
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1261
main_return_code
static int main_return_code
Definition: ffmpeg.c:346
vstats_file
static FILE * vstats_file
Definition: ffmpeg.c:112
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:929
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
InputFilter::eof
int eof
Definition: ffmpeg.h:255
InputStream::fix_sub_duration
int fix_sub_duration
Definition: ffmpeg.h:338
AV_DISPOSITION_METADATA
#define AV_DISPOSITION_METADATA
Definition: avformat.h:847
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
transcode_init
static int transcode_init(void)
Definition: ffmpeg.c:3625
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1397
get_format
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2775
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:565
sub2video_push_ref
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:222
time.h
close_all_output_streams
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:693
received_nb_signals
static volatile int received_nb_signals
Definition: ffmpeg.c:343
do_benchmark_all
int do_benchmark_all
Definition: ffmpeg_opt.c:157
AV_DISPOSITION_ORIGINAL
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:812
nb_input_streams
int nb_input_streams
Definition: ffmpeg.c:148
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:614
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:663
InputStream::min_pts
int64_t min_pts
Definition: ffmpeg.h:320
HWAccel::name
const char * name
Definition: ffmpeg.h:67
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:291
swresample.h
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
InputStream::sub2video::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:351
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:170
AVFormatContext::oformat
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1354
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
input_files
InputFile ** input_files
Definition: ffmpeg.c:149
AVStream::nb_frames
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:916
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
InputStream::frames_decoded
uint64_t frames_decoded
Definition: ffmpeg.h:384
OutputFilter::formats
int * formats
Definition: ffmpeg.h:276
InputStream::next_dts
int64_t next_dts
Definition: ffmpeg.h:311
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:564
FilterGraph
Definition: ffmpeg.h:281
print_stats
int print_stats
Definition: ffmpeg_opt.c:166
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1391
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:235
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:510
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
VSYNC_VFR
#define VSYNC_VFR
Definition: ffmpeg.h:52
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:484
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:649
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:255
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1623
options
const OptionDef options[]
AV_DISPOSITION_CAPTIONS
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:845
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1545
desc
const char * desc
Definition: nvenc.c:79
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
InputStream::hwaccel_device_type
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:365
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
InputStream::decoded_frame
AVFrame * decoded_frame
Definition: ffmpeg.h:305
AVPacket::size
int size
Definition: packet.h:356
InputStream::wrap_correction_done
int wrap_correction_done
Definition: ffmpeg.h:316
InputStream::start
int64_t start
Definition: ffmpeg.h:308
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
InputStream::filter_frame
AVFrame * filter_frame
Definition: ffmpeg.h:306
threadmessage.h
InputStream::file_index
int file_index
Definition: ffmpeg.h:295
output_files
OutputFile ** output_files
Definition: ffmpeg.c:154
parse_forced_key_frames
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3179
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:423
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
received_sigterm
static volatile int received_sigterm
Definition: ffmpeg.c:342
start_time
static int64_t start_time
Definition: ffplay.c:332
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:285
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1431
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
send_filter_eof
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2532
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
InputStream::got_output
int got_output
Definition: ffmpeg.h:340
AVCodec::defaults
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: codec.h:257
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:87
size
int size
Definition: twinvq_data.h:11134
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:160
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
subtitle_out
static uint8_t * subtitle_out
Definition: ffmpeg.c:145
copy_tb
int copy_tb
Definition: ffmpeg_opt.c:162
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1317
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2550
InputStream::prev_sub
struct InputStream::@2 prev_sub
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
InputStream::hwaccel_retrieved_pix_fmt
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:375
hwaccel_decode_init
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:516
av_stream_get_codec_timebase
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5843
OutputStream::source_index
int source_index
Definition: ffmpeg.h:446
DECODING_FOR_OST
#define DECODING_FOR_OST
Definition: ffmpeg.h:300
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
sub2video_update
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
AV_DISPOSITION_DUB
#define AV_DISPOSITION_DUB
Definition: avformat.h:811
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
AVStream::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:927
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2697
avdevice.h
AVFMT_NOSTREAMS
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:467
AV_DISPOSITION_HEARING_IMPAIRED
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:823
OSTFinished
OSTFinished
Definition: ffmpeg.h:438
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:813
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:213
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:370
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
InputStream::samples_decoded
uint64_t samples_decoded
Definition: ffmpeg.h:385
OutputFile::limit_filesize
uint64_t limit_filesize
Definition: ffmpeg.h:560
dup_warning
static unsigned dup_warning
Definition: ffmpeg.c:136
AVPacketSideData::size
int size
Definition: packet.h:300
av_sdp_create
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:842
InputStream::max_pts
int64_t max_pts
Definition: ffmpeg.h:321
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:671
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
do_benchmark
int do_benchmark
Definition: ffmpeg_opt.c:156
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
get_buffer
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2861
bitrate
int64_t bitrate
Definition: h264_levels.c:131
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:712
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
ifilter_send_frame
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2107
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2642
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
av_thread_message_queue_alloc
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1187
decode
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2211
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:967
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
guess_input_channel_layout
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2057
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1184
write_packet
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:702
do_subtitle_out
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:960
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
register_exit
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:128
do_video_stats
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1350
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1206
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
HWACCEL_AUTO
@ HWACCEL_AUTO
Definition: ffmpeg.h:60
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:447
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
InputStream::guess_layout_max
int guess_layout_max
Definition: ffmpeg.h:334
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1649
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1170
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1251
av_log_set_level
void av_log_set_level(int level)
Set the log level.
Definition: log.c:440
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
bprint.h
DECODING_FOR_FILTER
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:301
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:223
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
InputStream::ret
int ret
Definition: ffmpeg.h:341
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:525
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
sub2video_flush
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:312
internal.h
AVCodecParameters::height
int height
Definition: codec_par.h:127
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2184
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVCodecParameters::block_align
int block_align
Audio only.
Definition: codec_par.h:177
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:155
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
av_thread_message_queue_set_err_send
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
Definition: threadmessage.c:188
vsnprintf
#define vsnprintf
Definition: snprintf.h:36
exit_on_error
int exit_on_error
Definition: ffmpeg_opt.c:164
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffmpeg.c:349
OutputFile::ost_index
int ost_index
Definition: ffmpeg.h:557
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:457
InputStream::hwaccel_uninit
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:371
AV_DISPOSITION_KARAOKE
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:815
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:394
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
av_get_audio_frame_duration
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1799
transcode_subtitles
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2455
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1257
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
AV_DISPOSITION_DEPENDENT
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:848
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:489
AVCodecContext::height
int height
Definition: avcodec.h:699
OutputFilter::channel_layouts
uint64_t * channel_layouts
Definition: ffmpeg.h:277
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
nb_output_files
int nb_output_files
Definition: ffmpeg.c:155
AVCodecParameters::field_order
enum AVFieldOrder field_order
Video only.
Definition: codec_par.h:141
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:503
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVFMT_TS_NONSTRICT
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:472
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:918
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
AV_DISPOSITION_VISUAL_IMPAIRED
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:824
decode_error_stat
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:872
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:595
AVFMT_FLAG_BITEXACT
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1483
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:865
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
abort_on_flags
int abort_on_flags
Definition: ffmpeg_opt.c:165
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1080
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
output_packet
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:861
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:238
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
normalize.ifile
ifile
Definition: normalize.py:6
sdp_filename
char * sdp_filename
Definition: ffmpeg_opt.c:145
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:971
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
AVCodecContext::opaque
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:568
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:361
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3266
hw_device_free_all
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:274
avformat.h
InputFile::eagain
int eagain
Definition: ffmpeg.h:396
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:388
InputFile::ist_index
int ist_index
Definition: ffmpeg.h:397
AV_DISPOSITION_DESCRIPTIONS
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:846
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:366
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:197
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
InputFilter::sample_rate
int sample_rate
Definition: ffmpeg.h:249
HWAccel::init
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:68
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5071
ifilter_parameters_from_frame
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1183
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodecContext
main external API structure.
Definition: avcodec.h:526
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2109
AVFrame::height
int height
Definition: frame.h:358
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:866
transcode_init_done
static atomic_int transcode_init_done
Definition: ffmpeg.c:344
BenchmarkTimeStamps
Definition: ffmpeg.c:123
avformat_transfer_internal_stream_timing_info
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5781
hw_device_setup_for_encode
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:419
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
InputFilter::format
int format
Definition: ffmpeg.h:244
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
OutputStream::finished
OSTFinished finished
Definition: ffmpeg.h:512
report_new_stream
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3610
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
av_stream_get_end_pts
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:136
check_decode_result
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2077
avfilter.h
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4455
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:989
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
InputStream::nb_samples
int64_t nb_samples
Definition: ffmpeg.h:327
InputFilter::height
int height
Definition: ffmpeg.h:246
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:357
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1848
InputFile::ts_offset
int64_t ts_offset
Definition: ffmpeg.h:404
InputStream::discard
int discard
Definition: ffmpeg.h:297
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
print_report
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1635
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
OutputFilter
Definition: ffmpeg.h:258
nb_frames_dup
static int nb_frames_dup
Definition: ffmpeg.c:135
InputStream::sub2video::frame
AVFrame * frame
Definition: ffmpeg.h:349
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:534
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:287
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
audio_volume
int audio_volume
Definition: ffmpeg_opt.c:151
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
OutputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:555
OutputFilter::out_tmp
AVFilterInOut * out_tmp
Definition: ffmpeg.h:265
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:225
AVCodecParameters::video_delay
int video_delay
Video only.
Definition: codec_par.h:155
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
InputStream::sub2video::h
int h
Definition: ffmpeg.h:350
llrint
#define llrint(x)
Definition: libm.h:394
set_encoder_id
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3138
AVCodecParameters::format
int format
Definition: codec_par.h:84
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
InputFilter::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:253
InputStream::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:376
av_fifo_freep
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
FKF_N_FORCED
@ FKF_N_FORCED
Definition: ffmpeg.h:426
AVDictionaryEntry
Definition: dict.h:81
InputStream::sub2video::end_pts
int64_t end_pts
Definition: ffmpeg.h:347
av_add_q
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
stdin_interaction
int stdin_interaction
Definition: ffmpeg_opt.c:168
do_hex_dump
int do_hex_dump
Definition: ffmpeg_opt.c:158
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
AVCodecParameters::codec_id
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
InputStream::ts_scale
double ts_scale
Definition: ffmpeg.h:329
AVPacket
This structure stores compressed data.
Definition: packet.h:332
av_thread_message_queue_free
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1236
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
init_input_stream
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2871
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:375
InputFile::input_ts_offset
int64_t input_ts_offset
Definition: ffmpeg.h:402
AVCodecParameters::channel_layout
uint64_t channel_layout
Audio only.
Definition: codec_par.h:162
av_bsf_free
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
InputStream::dts_buffer
int64_t * dts_buffer
Definition: ffmpeg.h:387
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:158
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:222
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:162
imgutils.h
AV_PKT_DATA_QUALITY_STATS
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:132
timestamp.h
OutputStream
Definition: muxing.c:53
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
hwcontext.h
av_thread_message_queue_set_err_recv
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
Definition: threadmessage.c:199
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1287
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
codec_flags
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:149
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
av_stream_get_parser
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:144
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
AVCodecHWConfig
Definition: codec.h:425
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
av_pkt_dump_log2
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:114
avcodec_descriptor_get
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3394
InputStream::nb_dts_buffer
int nb_dts_buffer
Definition: ffmpeg.h:388
InputStream::saw_first_ts
int saw_first_ts
Definition: ffmpeg.h:330
AVDictionaryEntry::value
char * value
Definition: dict.h:83
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:904
set_tty_echo
static void set_tty_echo(int on)
Definition: ffmpeg.c:3855
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
InputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:409
FKF_N
@ FKF_N
Definition: ffmpeg.h:425
AVStream::pts_wrap_bits
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1057
log_callback_null
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4816
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
snprintf
#define snprintf
Definition: snprintf.h:34
ABORT_ON_FLAG_EMPTY_OUTPUT
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:433
read_key
static int read_key(void)
Definition: ffmpeg.c:433
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
reap_filters
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1414
VSYNC_DROP
#define VSYNC_DROP
Definition: ffmpeg.h:54
buffersrc.h
AVCodecHWConfig::device_type
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:446
InputStream::subtitle
AVSubtitle subtitle
Definition: ffmpeg.h:342
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
init_output_bsfs
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:2995
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2696
filtergraph_is_simple
int filtergraph_is_simple(FilterGraph *fg)
Definition: ffmpeg_filter.c:1215
init_encoder_time_base
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3242
dec_ctx
static AVCodecContext * dec_ctx
Definition: filtering_audio.c:43
nb_output_streams
int nb_output_streams
Definition: ffmpeg.c:153
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
OutputFile
Definition: ffmpeg.h:554
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
AV_DISPOSITION_LYRICS
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:814
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:336
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
FF_API_DEBUG_MV
#define FF_API_DEBUG_MV
Definition: version.h:58