FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
146 static uint8_t *subtitle_out;
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
268  av_log(ist->dec_ctx, AV_LOG_ERROR,
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
537  av_fifo_freep(&ifilter->frame_queue);
538  if (ist->sub2video.sub_queue) {
539  while (av_fifo_size(ist->sub2video.sub_queue)) {
540  AVSubtitle sub;
541  av_fifo_generic_read(ist->sub2video.sub_queue,
542  &sub, sizeof(sub), NULL);
544  }
545  av_fifo_freep(&ist->sub2video.sub_queue);
546  }
547  av_buffer_unref(&ifilter->hw_frames_ctx);
548  av_freep(&ifilter->name);
549  av_freep(&fg->inputs[j]);
550  }
551  av_freep(&fg->inputs);
552  for (j = 0; j < fg->nb_outputs; j++) {
553  OutputFilter *ofilter = fg->outputs[j];
554 
555  avfilter_inout_free(&ofilter->out_tmp);
556  av_freep(&ofilter->name);
557  av_freep(&ofilter->formats);
558  av_freep(&ofilter->channel_layouts);
559  av_freep(&ofilter->sample_rates);
560  av_freep(&fg->outputs[j]);
561  }
562  av_freep(&fg->outputs);
563  av_freep(&fg->graph_desc);
564 
566  }
568 
570 
571  /* close files */
572  for (i = 0; i < nb_output_files; i++) {
573  OutputFile *of = output_files[i];
575  if (!of)
576  continue;
577  s = of->ctx;
578  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
579  avio_closep(&s->pb);
581  av_dict_free(&of->opts);
582 
584  }
585  for (i = 0; i < nb_output_streams; i++) {
587 
588  if (!ost)
589  continue;
590 
592 
597 
600  av_freep(&ost->avfilter);
602 
605 
608 
611 
612  if (ost->muxing_queue) {
613  while (av_fifo_size(ost->muxing_queue)) {
614  AVPacket *pkt;
617  }
619  }
620 
622  }
623 #if HAVE_THREADS
624  free_input_threads();
625 #endif
626  for (i = 0; i < nb_input_files; i++) {
630  }
631  for (i = 0; i < nb_input_streams; i++) {
633 
634  av_frame_free(&ist->decoded_frame);
635  av_frame_free(&ist->filter_frame);
636  av_packet_free(&ist->pkt);
637  av_dict_free(&ist->decoder_opts);
638  avsubtitle_free(&ist->prev_sub.subtitle);
639  av_frame_free(&ist->sub2video.frame);
640  av_freep(&ist->filters);
641  av_freep(&ist->hwaccel_device);
642  av_freep(&ist->dts_buffer);
643 
644  avcodec_free_context(&ist->dec_ctx);
645 
647  }
648 
649  if (vstats_file) {
650  if (fclose(vstats_file))
652  "Error closing vstats file, loss of information possible: %s\n",
653  av_err2str(AVERROR(errno)));
654  }
656 
661 
662  uninit_opts();
663 
665 
666  if (received_sigterm) {
667  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
668  (int) received_sigterm);
669  } else if (ret && atomic_load(&transcode_init_done)) {
670  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
671  }
672  term_exit();
673  ffmpeg_exited = 1;
674 }
675 
677 {
678  AVDictionaryEntry *t = NULL;
679 
680  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
682  }
683 }
684 
686 {
688  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
689  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
690  exit_program(1);
691  }
692 }
693 
694 static void abort_codec_experimental(const AVCodec *c, int encoder)
695 {
696  exit_program(1);
697 }
698 
699 static void update_benchmark(const char *fmt, ...)
700 {
701  if (do_benchmark_all) {
703  va_list va;
704  char buf[1024];
705 
706  if (fmt) {
707  va_start(va, fmt);
708  vsnprintf(buf, sizeof(buf), fmt, va);
709  va_end(va);
711  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
714  t.real_usec - current_time.real_usec, buf);
715  }
716  current_time = t;
717  }
718 }
719 
721 {
722  int i;
723  for (i = 0; i < nb_output_streams; i++) {
724  OutputStream *ost2 = output_streams[i];
725  ost2->finished |= ost == ost2 ? this_stream : others;
726  }
727 }
728 
729 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
730 {
731  AVFormatContext *s = of->ctx;
732  AVStream *st = ost->st;
733  int ret;
734 
735  /*
736  * Audio encoders may split the packets -- #frames in != #packets out.
737  * But there is no reordering, so we can limit the number of output packets
738  * by simply dropping them here.
739  * Counting encoded video frames needs to be done separately because of
740  * reordering, see do_video_out().
741  * Do not count the packet when unqueued because it has been counted when queued.
742  */
743  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
744  if (ost->frame_number >= ost->max_frames) {
746  return;
747  }
748  ost->frame_number++;
749  }
750 
751  if (!of->header_written) {
752  AVPacket *tmp_pkt;
753  /* the muxer is not initialized yet, buffer the packet */
754  if (!av_fifo_space(ost->muxing_queue)) {
755  unsigned int are_we_over_size =
757  int new_size = are_we_over_size ?
761 
762  if (new_size <= av_fifo_size(ost->muxing_queue)) {
764  "Too many packets buffered for output stream %d:%d.\n",
765  ost->file_index, ost->st->index);
766  exit_program(1);
767  }
768  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
769  if (ret < 0)
770  exit_program(1);
771  }
773  if (ret < 0)
774  exit_program(1);
775  tmp_pkt = av_packet_alloc();
776  if (!tmp_pkt)
777  exit_program(1);
778  av_packet_move_ref(tmp_pkt, pkt);
779  ost->muxing_queue_data_size += tmp_pkt->size;
780  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
781  return;
782  }
783 
786  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
787 
789  int i;
791  NULL);
792  ost->quality = sd ? AV_RL32(sd) : -1;
793  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
794 
795  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
796  if (sd && i < sd[5])
797  ost->error[i] = AV_RL64(sd + 8 + 8*i);
798  else
799  ost->error[i] = -1;
800  }
801 
802  if (ost->frame_rate.num && ost->is_cfr) {
803  if (pkt->duration > 0)
804  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
806  ost->mux_timebase);
807  }
808  }
809 
811 
812  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
813  if (pkt->dts != AV_NOPTS_VALUE &&
814  pkt->pts != AV_NOPTS_VALUE &&
815  pkt->dts > pkt->pts) {
816  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
817  pkt->dts, pkt->pts,
818  ost->file_index, ost->st->index);
819  pkt->pts =
820  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
821  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
822  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
823  }
825  pkt->dts != AV_NOPTS_VALUE &&
828  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
829  if (pkt->dts < max) {
830  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
831  if (exit_on_error)
832  loglevel = AV_LOG_ERROR;
833  av_log(s, loglevel, "Non-monotonous DTS in output stream "
834  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
836  if (exit_on_error) {
837  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
838  exit_program(1);
839  }
840  av_log(s, loglevel, "changing to %"PRId64". This may result "
841  "in incorrect timestamps in the output file.\n",
842  max);
843  if (pkt->pts >= pkt->dts)
844  pkt->pts = FFMAX(pkt->pts, max);
845  pkt->dts = max;
846  }
847  }
848  }
849  ost->last_mux_dts = pkt->dts;
850 
851  ost->data_size += pkt->size;
852  ost->packets_written++;
853 
855 
856  if (debug_ts) {
857  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
858  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
862  pkt->size
863  );
864  }
865 
867  if (ret < 0) {
868  print_error("av_interleaved_write_frame()", ret);
869  main_return_code = 1;
871  }
873 }
874 
876 {
878 
880  if (of->shortest) {
882  of->recording_time = FFMIN(of->recording_time, end);
883  }
884 }
885 
886 /*
887  * Send a single packet to the output, applying any bitstream filters
888  * associated with the output stream. This may result in any number
889  * of packets actually being written, depending on what bitstream
890  * filters are applied. The supplied packet is consumed and will be
891  * blank (as if newly-allocated) when this function returns.
892  *
893  * If eof is set, instead indicate EOF to all bitstream filters and
894  * therefore flush any delayed packets to the output. A blank packet
895  * must be supplied in this case.
896  */
898  OutputStream *ost, int eof)
899 {
900  int ret = 0;
901 
902  /* apply the output bitstream filters */
903  if (ost->bsf_ctx) {
904  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
905  if (ret < 0)
906  goto finish;
907  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
908  write_packet(of, pkt, ost, 0);
909  if (ret == AVERROR(EAGAIN))
910  ret = 0;
911  } else if (!eof)
912  write_packet(of, pkt, ost, 0);
913 
914 finish:
915  if (ret < 0 && ret != AVERROR_EOF) {
916  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
917  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
918  if(exit_on_error)
919  exit_program(1);
920  }
921 }
922 
924 {
926 
927  if (of->recording_time != INT64_MAX &&
929  AV_TIME_BASE_Q) >= 0) {
931  return 0;
932  }
933  return 1;
934 }
935 
937  AVFrame *frame)
938 {
939  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
940  AVCodecContext *enc = ost->enc_ctx;
941  if (!frame || frame->pts == AV_NOPTS_VALUE ||
942  !enc || !ost->filter || !ost->filter->graph->graph)
943  goto early_exit;
944 
945  {
947 
948  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
950  AVRational tb = enc->time_base;
951  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
952 
953  tb.den <<= extra_bits;
954  float_pts =
955  av_rescale_q(frame->pts, filter_tb, tb) -
957  float_pts /= 1 << extra_bits;
958  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
959  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
960 
961  frame->pts =
962  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
964  }
965 
966 early_exit:
967 
968  if (debug_ts) {
969  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
970  frame ? av_ts2str(frame->pts) : "NULL",
971  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
972  float_pts,
973  enc ? enc->time_base.num : -1,
974  enc ? enc->time_base.den : -1);
975  }
976 
977  return float_pts;
978 }
979 
981  char *error, int error_len);
982 
984  unsigned int fatal)
985 {
986  int ret = AVERROR_BUG;
987  char error[1024] = {0};
988 
989  if (ost->initialized)
990  return 0;
991 
992  ret = init_output_stream(ost, frame, error, sizeof(error));
993  if (ret < 0) {
994  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
996 
997  if (fatal)
998  exit_program(1);
999  }
1000 
1001  return ret;
1002 }
1003 
1005  AVFrame *frame)
1006 {
1007  AVCodecContext *enc = ost->enc_ctx;
1008  AVPacket *pkt = ost->pkt;
1009  int ret;
1010 
1012 
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1017  frame->pts = ost->sync_opts;
1018  ost->sync_opts = frame->pts + frame->nb_samples;
1019  ost->samples_encoded += frame->nb_samples;
1020  ost->frames_encoded++;
1021 
1023  if (debug_ts) {
1024  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1025  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1026  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1027  enc->time_base.num, enc->time_base.den);
1028  }
1029 
1030  ret = avcodec_send_frame(enc, frame);
1031  if (ret < 0)
1032  goto error;
1033 
1034  while (1) {
1036  ret = avcodec_receive_packet(enc, pkt);
1037  if (ret == AVERROR(EAGAIN))
1038  break;
1039  if (ret < 0)
1040  goto error;
1041 
1042  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1043 
1045 
1046  if (debug_ts) {
1047  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1048  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1051  }
1052 
1053  output_packet(of, pkt, ost, 0);
1054  }
1055 
1056  return;
1057 error:
1058  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1059  exit_program(1);
1060 }
1061 
1062 static void do_subtitle_out(OutputFile *of,
1063  OutputStream *ost,
1064  AVSubtitle *sub)
1065 {
1066  int subtitle_out_max_size = 1024 * 1024;
1067  int subtitle_out_size, nb, i;
1068  AVCodecContext *enc;
1069  AVPacket *pkt = ost->pkt;
1070  int64_t pts;
1071 
1072  if (sub->pts == AV_NOPTS_VALUE) {
1073  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1074  if (exit_on_error)
1075  exit_program(1);
1076  return;
1077  }
1078 
1079  enc = ost->enc_ctx;
1080 
1081  if (!subtitle_out) {
1082  subtitle_out = av_malloc(subtitle_out_max_size);
1083  if (!subtitle_out) {
1084  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1085  exit_program(1);
1086  }
1087  }
1088 
1089  /* Note: DVB subtitle need one packet to draw them and one other
1090  packet to clear them */
1091  /* XXX: signal it in the codec context ? */
1092  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1093  nb = 2;
1094  else
1095  nb = 1;
1096 
1097  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1098  pts = sub->pts;
1101  for (i = 0; i < nb; i++) {
1102  unsigned save_num_rects = sub->num_rects;
1103 
1105  if (!check_recording_time(ost))
1106  return;
1107 
1108  sub->pts = pts;
1109  // start_display_time is required to be 0
1110  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1111  sub->end_display_time -= sub->start_display_time;
1112  sub->start_display_time = 0;
1113  if (i == 1)
1114  sub->num_rects = 0;
1115 
1116  ost->frames_encoded++;
1117 
1118  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1119  subtitle_out_max_size, sub);
1120  if (i == 1)
1121  sub->num_rects = save_num_rects;
1122  if (subtitle_out_size < 0) {
1123  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1124  exit_program(1);
1125  }
1126 
1128  pkt->data = subtitle_out;
1129  pkt->size = subtitle_out_size;
1131  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1132  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1133  /* XXX: the pts correction is handled here. Maybe handling
1134  it in the codec would be better */
1135  if (i == 0)
1136  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1137  else
1138  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139  }
1140  pkt->dts = pkt->pts;
1141  output_packet(of, pkt, ost, 0);
1142  }
1143 }
1144 
1145 static void do_video_out(OutputFile *of,
1146  OutputStream *ost,
1147  AVFrame *next_picture)
1148 {
1149  int ret, format_video_sync;
1150  AVPacket *pkt = ost->pkt;
1151  AVCodecContext *enc = ost->enc_ctx;
1152  AVRational frame_rate;
1153  int nb_frames, nb0_frames, i;
1154  double delta, delta0;
1155  double duration = 0;
1156  double sync_ipts = AV_NOPTS_VALUE;
1157  int frame_size = 0;
1158  InputStream *ist = NULL;
1160 
1161  init_output_stream_wrapper(ost, next_picture, 1);
1162  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1163 
1164  if (ost->source_index >= 0)
1166 
1167  frame_rate = av_buffersink_get_frame_rate(filter);
1168  if (frame_rate.num > 0 && frame_rate.den > 0)
1169  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1170 
1171  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1173 
1174  if (!ost->filters_script &&
1175  !ost->filters &&
1176  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177  next_picture &&
1178  ist &&
1179  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1180  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181  }
1182 
1183  if (!next_picture) {
1184  //end, flushing
1185  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1186  ost->last_nb0_frames[1],
1187  ost->last_nb0_frames[2]);
1188  } else {
1189  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1190  delta = delta0 + duration;
1191 
1192  /* by default, we output a single frame */
1193  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194  nb_frames = 1;
1195 
1196  format_video_sync = video_sync_method;
1197  if (format_video_sync == VSYNC_AUTO) {
1198  if(!strcmp(of->ctx->oformat->name, "avi")) {
1199  format_video_sync = VSYNC_VFR;
1200  } else
1201  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1202  if ( ist
1203  && format_video_sync == VSYNC_CFR
1204  && input_files[ist->file_index]->ctx->nb_streams == 1
1205  && input_files[ist->file_index]->input_ts_offset == 0) {
1206  format_video_sync = VSYNC_VSCFR;
1207  }
1208  if (format_video_sync == VSYNC_CFR && copy_ts) {
1209  format_video_sync = VSYNC_VSCFR;
1210  }
1211  }
1212  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1213 
1214  if (delta0 < 0 &&
1215  delta > 0 &&
1216  format_video_sync != VSYNC_PASSTHROUGH &&
1217  format_video_sync != VSYNC_DROP) {
1218  if (delta0 < -0.6) {
1219  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1220  } else
1221  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1222  sync_ipts = ost->sync_opts;
1223  duration += delta0;
1224  delta0 = 0;
1225  }
1226 
1227  switch (format_video_sync) {
1228  case VSYNC_VSCFR:
1229  if (ost->frame_number == 0 && delta0 >= 0.5) {
1230  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231  delta = duration;
1232  delta0 = 0;
1233  ost->sync_opts = llrint(sync_ipts);
1234  }
1235  case VSYNC_CFR:
1236  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1237  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1238  nb_frames = 0;
1239  } else if (delta < -1.1)
1240  nb_frames = 0;
1241  else if (delta > 1.1) {
1242  nb_frames = lrintf(delta);
1243  if (delta0 > 1.1)
1244  nb0_frames = llrintf(delta0 - 0.6);
1245  }
1246  break;
1247  case VSYNC_VFR:
1248  if (delta <= -0.6)
1249  nb_frames = 0;
1250  else if (delta > 0.6)
1251  ost->sync_opts = llrint(sync_ipts);
1252  break;
1253  case VSYNC_DROP:
1254  case VSYNC_PASSTHROUGH:
1255  ost->sync_opts = llrint(sync_ipts);
1256  break;
1257  default:
1258  av_assert0(0);
1259  }
1260  }
1261 
1262  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1263  nb0_frames = FFMIN(nb0_frames, nb_frames);
1264 
1265  memmove(ost->last_nb0_frames + 1,
1267  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1268  ost->last_nb0_frames[0] = nb0_frames;
1269 
1270  if (nb0_frames == 0 && ost->last_dropped) {
1271  nb_frames_drop++;
1273  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1275  }
1276  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1277  if (nb_frames > dts_error_threshold * 30) {
1278  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1279  nb_frames_drop++;
1280  return;
1281  }
1282  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1283  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1284  if (nb_frames_dup > dup_warning) {
1285  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1286  dup_warning *= 10;
1287  }
1288  }
1289  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1290  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1291 
1292  /* duplicates frame if needed */
1293  for (i = 0; i < nb_frames; i++) {
1294  AVFrame *in_picture;
1295  int forced_keyframe = 0;
1296  double pts_time;
1297 
1298  if (i < nb0_frames && ost->last_frame) {
1299  in_picture = ost->last_frame;
1300  } else
1301  in_picture = next_picture;
1302 
1303  if (!in_picture)
1304  return;
1305 
1306  in_picture->pts = ost->sync_opts;
1307 
1308  if (!check_recording_time(ost))
1309  return;
1310 
1311  in_picture->quality = enc->global_quality;
1312  in_picture->pict_type = 0;
1313 
1315  in_picture->pts != AV_NOPTS_VALUE)
1316  ost->forced_kf_ref_pts = in_picture->pts;
1317 
1318  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1319  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1321  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1322  ost->forced_kf_index++;
1323  forced_keyframe = 1;
1324  } else if (ost->forced_keyframes_pexpr) {
1325  double res;
1329  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1335  res);
1336  if (res) {
1337  forced_keyframe = 1;
1343  }
1344 
1346  } else if ( ost->forced_keyframes
1347  && !strncmp(ost->forced_keyframes, "source", 6)
1348  && in_picture->key_frame==1
1349  && !i) {
1350  forced_keyframe = 1;
1351  } else if ( ost->forced_keyframes
1352  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1353  && !i) {
1354  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1355  ost->dropped_keyframe = 0;
1356  }
1357 
1358  if (forced_keyframe) {
1359  in_picture->pict_type = AV_PICTURE_TYPE_I;
1360  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1361  }
1362 
1364  if (debug_ts) {
1365  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1366  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1367  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1368  enc->time_base.num, enc->time_base.den);
1369  }
1370 
1371  ost->frames_encoded++;
1372 
1373  ret = avcodec_send_frame(enc, in_picture);
1374  if (ret < 0)
1375  goto error;
1376  // Make sure Closed Captions will not be duplicated
1378 
1379  while (1) {
1381  ret = avcodec_receive_packet(enc, pkt);
1382  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1383  if (ret == AVERROR(EAGAIN))
1384  break;
1385  if (ret < 0)
1386  goto error;
1387 
1388  if (debug_ts) {
1389  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1390  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1393  }
1394 
1396  pkt->pts = ost->sync_opts;
1397 
1399 
1400  if (debug_ts) {
1401  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1402  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1405  }
1406 
1407  frame_size = pkt->size;
1408  output_packet(of, pkt, ost, 0);
1409 
1410  /* if two pass, output log */
1411  if (ost->logfile && enc->stats_out) {
1412  fprintf(ost->logfile, "%s", enc->stats_out);
1413  }
1414  }
1415  ost->sync_opts++;
1416  /*
1417  * For video, number of frames in == number of packets out.
1418  * But there may be reordering, so we can't throw away frames on encoder
1419  * flush, we need to limit them here, before they go into encoder.
1420  */
1421  ost->frame_number++;
1422 
1423  if (vstats_filename && frame_size)
1425  }
1426 
1427  if (!ost->last_frame)
1430  if (next_picture && ost->last_frame)
1431  av_frame_ref(ost->last_frame, next_picture);
1432  else
1434 
1435  return;
1436 error:
1437  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1438  exit_program(1);
1439 }
1440 
1441 static double psnr(double d)
1442 {
1443  return -10.0 * log10(d);
1444 }
1445 
1447 {
1448  AVCodecContext *enc;
1449  int frame_number;
1450  double ti1, bitrate, avg_bitrate;
1451 
1452  /* this is executed just the first time do_video_stats is called */
1453  if (!vstats_file) {
1454  vstats_file = fopen(vstats_filename, "w");
1455  if (!vstats_file) {
1456  perror("fopen");
1457  exit_program(1);
1458  }
1459  }
1460 
1461  enc = ost->enc_ctx;
1462  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1463  frame_number = ost->st->nb_frames;
1464  if (vstats_version <= 1) {
1465  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1466  ost->quality / (float)FF_QP2LAMBDA);
1467  } else {
1468  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1469  ost->quality / (float)FF_QP2LAMBDA);
1470  }
1471 
1472  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1473  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1474 
1475  fprintf(vstats_file,"f_size= %6d ", frame_size);
1476  /* compute pts value */
1478  if (ti1 < 0.01)
1479  ti1 = 0.01;
1480 
1481  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1482  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1483  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1484  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1485  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1486  }
1487 }
1488 
1490 {
1492  int i;
1493 
1495 
1496  if (of->shortest) {
1497  for (i = 0; i < of->ctx->nb_streams; i++)
1499  }
1500 }
1501 
1502 /**
1503  * Get and encode new output from any of the filtergraphs, without causing
1504  * activity.
1505  *
1506  * @return 0 for success, <0 for severe errors
1507  */
1508 static int reap_filters(int flush)
1509 {
1510  AVFrame *filtered_frame = NULL;
1511  int i;
1512 
1513  /* Reap all buffers present in the buffer sinks */
1514  for (i = 0; i < nb_output_streams; i++) {
1518  AVCodecContext *enc = ost->enc_ctx;
1519  int ret = 0;
1520 
1521  if (!ost->filter || !ost->filter->graph->graph)
1522  continue;
1523  filter = ost->filter->filter;
1524 
1525  /*
1526  * Unlike video, with audio the audio frame size matters.
1527  * Currently we are fully reliant on the lavfi filter chain to
1528  * do the buffering deed for us, and thus the frame size parameter
1529  * needs to be set accordingly. Where does one get the required
1530  * frame size? From the initialized AVCodecContext of an audio
1531  * encoder. Thus, if we have gotten to an audio stream, initialize
1532  * the encoder earlier than receiving the first AVFrame.
1533  */
1536 
1537  if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1538  return AVERROR(ENOMEM);
1539  }
1540  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1541  return AVERROR(ENOMEM);
1542  }
1543  filtered_frame = ost->filtered_frame;
1544 
1545  while (1) {
1546  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1548  if (ret < 0) {
1549  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1551  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1552  } else if (flush && ret == AVERROR_EOF) {
1554  do_video_out(of, ost, NULL);
1555  }
1556  break;
1557  }
1558  if (ost->finished) {
1559  av_frame_unref(filtered_frame);
1560  continue;
1561  }
1562 
1563  switch (av_buffersink_get_type(filter)) {
1564  case AVMEDIA_TYPE_VIDEO:
1565  if (!ost->frame_aspect_ratio.num)
1566  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1567 
1568  do_video_out(of, ost, filtered_frame);
1569  break;
1570  case AVMEDIA_TYPE_AUDIO:
1571  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1572  enc->channels != filtered_frame->channels) {
1574  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1575  break;
1576  }
1577  do_audio_out(of, ost, filtered_frame);
1578  break;
1579  default:
1580  // TODO support subtitle filters
1581  av_assert0(0);
1582  }
1583 
1584  av_frame_unref(filtered_frame);
1585  }
1586  }
1587 
1588  return 0;
1589 }
1590 
1591 static void print_final_stats(int64_t total_size)
1592 {
1593  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1594  uint64_t subtitle_size = 0;
1595  uint64_t data_size = 0;
1596  float percent = -1.0;
1597  int i, j;
1598  int pass1_used = 1;
1599 
1600  for (i = 0; i < nb_output_streams; i++) {
1602  switch (ost->enc_ctx->codec_type) {
1603  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1604  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1605  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1606  default: other_size += ost->data_size; break;
1607  }
1608  extra_size += ost->enc_ctx->extradata_size;
1609  data_size += ost->data_size;
1612  pass1_used = 0;
1613  }
1614 
1615  if (data_size && total_size>0 && total_size >= data_size)
1616  percent = 100.0 * (total_size - data_size) / data_size;
1617 
1618  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1619  video_size / 1024.0,
1620  audio_size / 1024.0,
1621  subtitle_size / 1024.0,
1622  other_size / 1024.0,
1623  extra_size / 1024.0);
1624  if (percent >= 0.0)
1625  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1626  else
1627  av_log(NULL, AV_LOG_INFO, "unknown");
1628  av_log(NULL, AV_LOG_INFO, "\n");
1629 
1630  /* print verbose per-stream stats */
1631  for (i = 0; i < nb_input_files; i++) {
1632  InputFile *f = input_files[i];
1633  uint64_t total_packets = 0, total_size = 0;
1634 
1635  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1636  i, f->ctx->url);
1637 
1638  for (j = 0; j < f->nb_streams; j++) {
1639  InputStream *ist = input_streams[f->ist_index + j];
1640  enum AVMediaType type = ist->dec_ctx->codec_type;
1641 
1642  total_size += ist->data_size;
1643  total_packets += ist->nb_packets;
1644 
1645  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1646  i, j, media_type_string(type));
1647  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1648  ist->nb_packets, ist->data_size);
1649 
1650  if (ist->decoding_needed) {
1651  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1652  ist->frames_decoded);
1653  if (type == AVMEDIA_TYPE_AUDIO)
1654  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1655  av_log(NULL, AV_LOG_VERBOSE, "; ");
1656  }
1657 
1658  av_log(NULL, AV_LOG_VERBOSE, "\n");
1659  }
1660 
1661  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1662  total_packets, total_size);
1663  }
1664 
1665  for (i = 0; i < nb_output_files; i++) {
1666  OutputFile *of = output_files[i];
1667  uint64_t total_packets = 0, total_size = 0;
1668 
1669  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1670  i, of->ctx->url);
1671 
1672  for (j = 0; j < of->ctx->nb_streams; j++) {
1675 
1676  total_size += ost->data_size;
1677  total_packets += ost->packets_written;
1678 
1679  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1680  i, j, media_type_string(type));
1681  if (ost->encoding_needed) {
1682  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1683  ost->frames_encoded);
1684  if (type == AVMEDIA_TYPE_AUDIO)
1685  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1686  av_log(NULL, AV_LOG_VERBOSE, "; ");
1687  }
1688 
1689  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1691 
1692  av_log(NULL, AV_LOG_VERBOSE, "\n");
1693  }
1694 
1695  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1696  total_packets, total_size);
1697  }
1698  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1699  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1700  if (pass1_used) {
1701  av_log(NULL, AV_LOG_WARNING, "\n");
1702  } else {
1703  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1704  }
1705  }
1706 }
1707 
1708 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1709 {
1710  AVBPrint buf, buf_script;
1711  OutputStream *ost;
1712  AVFormatContext *oc;
1713  int64_t total_size;
1714  AVCodecContext *enc;
1715  int frame_number, vid, i;
1716  double bitrate;
1717  double speed;
1718  int64_t pts = INT64_MIN + 1;
1719  static int64_t last_time = -1;
1720  static int first_report = 1;
1721  static int qp_histogram[52];
1722  int hours, mins, secs, us;
1723  const char *hours_sign;
1724  int ret;
1725  float t;
1726 
1727  if (!print_stats && !is_last_report && !progress_avio)
1728  return;
1729 
1730  if (!is_last_report) {
1731  if (last_time == -1) {
1732  last_time = cur_time;
1733  }
1734  if (((cur_time - last_time) < stats_period && !first_report) ||
1735  (first_report && nb_output_dumped < nb_output_files))
1736  return;
1737  last_time = cur_time;
1738  }
1739 
1740  t = (cur_time-timer_start) / 1000000.0;
1741 
1742 
1743  oc = output_files[0]->ctx;
1744 
1745  total_size = avio_size(oc->pb);
1746  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1747  total_size = avio_tell(oc->pb);
1748 
1749  vid = 0;
1751  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1752  for (i = 0; i < nb_output_streams; i++) {
1753  float q = -1;
1754  ost = output_streams[i];
1755  enc = ost->enc_ctx;
1756  if (!ost->stream_copy)
1757  q = ost->quality / (float) FF_QP2LAMBDA;
1758 
1759  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1760  av_bprintf(&buf, "q=%2.1f ", q);
1761  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1762  ost->file_index, ost->index, q);
1763  }
1764  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1765  float fps;
1766 
1767  frame_number = ost->frame_number;
1768  fps = t > 1 ? frame_number / t : 0;
1769  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1770  frame_number, fps < 9.95, fps, q);
1771  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1772  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1773  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1774  ost->file_index, ost->index, q);
1775  if (is_last_report)
1776  av_bprintf(&buf, "L");
1777  if (qp_hist) {
1778  int j;
1779  int qp = lrintf(q);
1780  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1781  qp_histogram[qp]++;
1782  for (j = 0; j < 32; j++)
1783  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1784  }
1785 
1786  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1787  int j;
1788  double error, error_sum = 0;
1789  double scale, scale_sum = 0;
1790  double p;
1791  char type[3] = { 'Y','U','V' };
1792  av_bprintf(&buf, "PSNR=");
1793  for (j = 0; j < 3; j++) {
1794  if (is_last_report) {
1795  error = enc->error[j];
1796  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1797  } else {
1798  error = ost->error[j];
1799  scale = enc->width * enc->height * 255.0 * 255.0;
1800  }
1801  if (j)
1802  scale /= 4;
1803  error_sum += error;
1804  scale_sum += scale;
1805  p = psnr(error / scale);
1806  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1807  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1808  ost->file_index, ost->index, type[j] | 32, p);
1809  }
1810  p = psnr(error_sum / scale_sum);
1811  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1812  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1813  ost->file_index, ost->index, p);
1814  }
1815  vid = 1;
1816  }
1817  /* compute min output value */
1821  if (copy_ts) {
1822  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1826  }
1827  }
1828 
1829  if (is_last_report)
1831  }
1832 
1833  secs = FFABS(pts) / AV_TIME_BASE;
1834  us = FFABS(pts) % AV_TIME_BASE;
1835  mins = secs / 60;
1836  secs %= 60;
1837  hours = mins / 60;
1838  mins %= 60;
1839  hours_sign = (pts < 0) ? "-" : "";
1840 
1841  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1842  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1843 
1844  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1845  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1846  if (pts == AV_NOPTS_VALUE) {
1847  av_bprintf(&buf, "N/A ");
1848  } else {
1849  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1850  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1851  }
1852 
1853  if (bitrate < 0) {
1854  av_bprintf(&buf, "bitrate=N/A");
1855  av_bprintf(&buf_script, "bitrate=N/A\n");
1856  }else{
1857  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1858  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1859  }
1860 
1861  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1862  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1863  if (pts == AV_NOPTS_VALUE) {
1864  av_bprintf(&buf_script, "out_time_us=N/A\n");
1865  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1866  av_bprintf(&buf_script, "out_time=N/A\n");
1867  } else {
1868  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1869  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1870  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1871  hours_sign, hours, mins, secs, us);
1872  }
1873 
1875  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1876  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1877  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1878 
1879  if (speed < 0) {
1880  av_bprintf(&buf, " speed=N/A");
1881  av_bprintf(&buf_script, "speed=N/A\n");
1882  } else {
1883  av_bprintf(&buf, " speed=%4.3gx", speed);
1884  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1885  }
1886 
1887  if (print_stats || is_last_report) {
1888  const char end = is_last_report ? '\n' : '\r';
1889  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1890  fprintf(stderr, "%s %c", buf.str, end);
1891  } else
1892  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1893 
1894  fflush(stderr);
1895  }
1896  av_bprint_finalize(&buf, NULL);
1897 
1898  if (progress_avio) {
1899  av_bprintf(&buf_script, "progress=%s\n",
1900  is_last_report ? "end" : "continue");
1901  avio_write(progress_avio, buf_script.str,
1902  FFMIN(buf_script.len, buf_script.size - 1));
1904  av_bprint_finalize(&buf_script, NULL);
1905  if (is_last_report) {
1906  if ((ret = avio_closep(&progress_avio)) < 0)
1908  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1909  }
1910  }
1911 
1912  first_report = 0;
1913 
1914  if (is_last_report)
1915  print_final_stats(total_size);
1916 }
1917 
1919 {
1920  // We never got any input. Set a fake format, which will
1921  // come from libavformat.
1922  ifilter->format = par->format;
1923  ifilter->sample_rate = par->sample_rate;
1924  ifilter->channels = par->channels;
1925  ifilter->channel_layout = par->channel_layout;
1926  ifilter->width = par->width;
1927  ifilter->height = par->height;
1928  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1929 }
1930 
1931 static void flush_encoders(void)
1932 {
1933  int i, ret;
1934 
1935  for (i = 0; i < nb_output_streams; i++) {
1937  AVCodecContext *enc = ost->enc_ctx;
1939 
1940  if (!ost->encoding_needed)
1941  continue;
1942 
1943  // Try to enable encoding with no input frames.
1944  // Maybe we should just let encoding fail instead.
1945  if (!ost->initialized) {
1946  FilterGraph *fg = ost->filter->graph;
1947 
1949  "Finishing stream %d:%d without any data written to it.\n",
1950  ost->file_index, ost->st->index);
1951 
1952  if (ost->filter && !fg->graph) {
1953  int x;
1954  for (x = 0; x < fg->nb_inputs; x++) {
1955  InputFilter *ifilter = fg->inputs[x];
1956  if (ifilter->format < 0)
1957  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1958  }
1959 
1961  continue;
1962 
1963  ret = configure_filtergraph(fg);
1964  if (ret < 0) {
1965  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1966  exit_program(1);
1967  }
1968 
1970  }
1971 
1973  }
1974 
1976  continue;
1977 
1978  for (;;) {
1979  const char *desc = NULL;
1980  AVPacket *pkt = ost->pkt;
1981  int pkt_size;
1982 
1983  switch (enc->codec_type) {
1984  case AVMEDIA_TYPE_AUDIO:
1985  desc = "audio";
1986  break;
1987  case AVMEDIA_TYPE_VIDEO:
1988  desc = "video";
1989  break;
1990  default:
1991  av_assert0(0);
1992  }
1993 
1995 
1997  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1998  ret = avcodec_send_frame(enc, NULL);
1999  if (ret < 0) {
2000  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2001  desc,
2002  av_err2str(ret));
2003  exit_program(1);
2004  }
2005  }
2006 
2007  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2008  if (ret < 0 && ret != AVERROR_EOF) {
2009  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2010  desc,
2011  av_err2str(ret));
2012  exit_program(1);
2013  }
2014  if (ost->logfile && enc->stats_out) {
2015  fprintf(ost->logfile, "%s", enc->stats_out);
2016  }
2017  if (ret == AVERROR_EOF) {
2018  output_packet(of, pkt, ost, 1);
2019  break;
2020  }
2021  if (ost->finished & MUXER_FINISHED) {
2023  continue;
2024  }
2026  pkt_size = pkt->size;
2027  output_packet(of, pkt, ost, 0);
2029  do_video_stats(ost, pkt_size);
2030  }
2031  }
2032  }
2033 }
2034 
2035 /*
2036  * Check whether a packet from ist should be written into ost at this time
2037  */
2039 {
2041  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2042 
2043  if (ost->source_index != ist_index)
2044  return 0;
2045 
2046  if (ost->finished)
2047  return 0;
2048 
2049  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2050  return 0;
2051 
2052  return 1;
2053 }
2054 
2056 {
2058  InputFile *f = input_files [ist->file_index];
2059  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2060  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2061  AVPacket *opkt = ost->pkt;
2062 
2063  av_packet_unref(opkt);
2064  // EOF: flush output bitstream filters.
2065  if (!pkt) {
2066  output_packet(of, opkt, ost, 1);
2067  return;
2068  }
2069 
2070  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2072  return;
2073 
2074  if (!ost->frame_number && !ost->copy_prior_start) {
2075  int64_t comp_start = start_time;
2076  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2077  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2078  if (pkt->pts == AV_NOPTS_VALUE ?
2079  ist->pts < comp_start :
2080  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2081  return;
2082  }
2083 
2084  if (of->recording_time != INT64_MAX &&
2085  ist->pts >= of->recording_time + start_time) {
2087  return;
2088  }
2089 
2090  if (f->recording_time != INT64_MAX) {
2091  start_time = 0;
2092  if (copy_ts) {
2093  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2094  start_time += start_at_zero ? 0 : f->ctx->start_time;
2095  }
2096  if (ist->pts >= f->recording_time + start_time) {
2098  return;
2099  }
2100  }
2101 
2102  /* force the input stream PTS */
2104  ost->sync_opts++;
2105 
2106  if (av_packet_ref(opkt, pkt) < 0)
2107  exit_program(1);
2108 
2109  if (pkt->pts != AV_NOPTS_VALUE)
2110  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2111 
2112  if (pkt->dts == AV_NOPTS_VALUE) {
2114  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2115  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2116  if(!duration)
2117  duration = ist->dec_ctx->frame_size;
2118  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2119  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2120  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2121  /* dts will be set immediately afterwards to what pts is now */
2122  opkt->pts = opkt->dts - ost_tb_start_time;
2123  } else
2124  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2125  opkt->dts -= ost_tb_start_time;
2126 
2127  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2128 
2129  output_packet(of, opkt, ost, 0);
2130 }
2131 
2133 {
2134  AVCodecContext *dec = ist->dec_ctx;
2135 
2136  if (!dec->channel_layout) {
2137  char layout_name[256];
2138 
2139  if (dec->channels > ist->guess_layout_max)
2140  return 0;
2141  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2142  if (!dec->channel_layout)
2143  return 0;
2144  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2145  dec->channels, dec->channel_layout);
2146  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2147  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2148  }
2149  return 1;
2150 }
2151 
2153 {
2154  if (*got_output || ret<0)
2155  decode_error_stat[ret<0] ++;
2156 
2157  if (ret < 0 && exit_on_error)
2158  exit_program(1);
2159 
2160  if (*got_output && ist) {
2161  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2163  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2164  if (exit_on_error)
2165  exit_program(1);
2166  }
2167  }
2168 }
2169 
2170 // Filters can be configured only if the formats of all inputs are known.
2172 {
2173  int i;
2174  for (i = 0; i < fg->nb_inputs; i++) {
2175  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2176  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2177  return 0;
2178  }
2179  return 1;
2180 }
2181 
2183 {
2184  FilterGraph *fg = ifilter->graph;
2185  int need_reinit, ret, i;
2186 
2187  /* determine if the parameters for this input changed */
2188  need_reinit = ifilter->format != frame->format;
2189 
2190  switch (ifilter->ist->st->codecpar->codec_type) {
2191  case AVMEDIA_TYPE_AUDIO:
2192  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2193  ifilter->channels != frame->channels ||
2194  ifilter->channel_layout != frame->channel_layout;
2195  break;
2196  case AVMEDIA_TYPE_VIDEO:
2197  need_reinit |= ifilter->width != frame->width ||
2198  ifilter->height != frame->height;
2199  break;
2200  }
2201 
2202  if (!ifilter->ist->reinit_filters && fg->graph)
2203  need_reinit = 0;
2204 
2205  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2206  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2207  need_reinit = 1;
2208 
2209  if (need_reinit) {
2211  if (ret < 0)
2212  return ret;
2213  }
2214 
2215  /* (re)init the graph if possible, otherwise buffer the frame and return */
2216  if (need_reinit || !fg->graph) {
2217  for (i = 0; i < fg->nb_inputs; i++) {
2218  if (!ifilter_has_all_input_formats(fg)) {
2220  if (!tmp)
2221  return AVERROR(ENOMEM);
2223 
2224  if (!av_fifo_space(ifilter->frame_queue)) {
2225  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2226  if (ret < 0) {
2227  av_frame_free(&tmp);
2228  return ret;
2229  }
2230  }
2231  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2232  return 0;
2233  }
2234  }
2235 
2236  ret = reap_filters(1);
2237  if (ret < 0 && ret != AVERROR_EOF) {
2238  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2239  return ret;
2240  }
2241 
2242  ret = configure_filtergraph(fg);
2243  if (ret < 0) {
2244  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2245  return ret;
2246  }
2247  }
2248 
2250  if (ret < 0) {
2251  if (ret != AVERROR_EOF)
2252  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2253  return ret;
2254  }
2255 
2256  return 0;
2257 }
2258 
2259 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2260 {
2261  int ret;
2262 
2263  ifilter->eof = 1;
2264 
2265  if (ifilter->filter) {
2267  if (ret < 0)
2268  return ret;
2269  } else {
2270  // the filtergraph was never configured
2271  if (ifilter->format < 0)
2272  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2273  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2274  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2275  return AVERROR_INVALIDDATA;
2276  }
2277  }
2278 
2279  return 0;
2280 }
2281 
2282 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2283 // There is the following difference: if you got a frame, you must call
2284 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2285 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2286 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2287 {
2288  int ret;
2289 
2290  *got_frame = 0;
2291 
2292  if (pkt) {
2293  ret = avcodec_send_packet(avctx, pkt);
2294  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2295  // decoded frames with avcodec_receive_frame() until done.
2296  if (ret < 0 && ret != AVERROR_EOF)
2297  return ret;
2298  }
2299 
2300  ret = avcodec_receive_frame(avctx, frame);
2301  if (ret < 0 && ret != AVERROR(EAGAIN))
2302  return ret;
2303  if (ret >= 0)
2304  *got_frame = 1;
2305 
2306  return 0;
2307 }
2308 
2310 {
2311  int i, ret;
2312  AVFrame *f;
2313 
2314  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2315  for (i = 0; i < ist->nb_filters; i++) {
2316  if (i < ist->nb_filters - 1) {
2317  f = ist->filter_frame;
2319  if (ret < 0)
2320  break;
2321  } else
2322  f = decoded_frame;
2323  ret = ifilter_send_frame(ist->filters[i], f);
2324  if (ret == AVERROR_EOF)
2325  ret = 0; /* ignore */
2326  if (ret < 0) {
2328  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2329  break;
2330  }
2331  }
2332  return ret;
2333 }
2334 
2336  int *decode_failed)
2337 {
2339  AVCodecContext *avctx = ist->dec_ctx;
2340  int ret, err = 0;
2341  AVRational decoded_frame_tb;
2342 
2343  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2344  return AVERROR(ENOMEM);
2345  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2346  return AVERROR(ENOMEM);
2347  decoded_frame = ist->decoded_frame;
2348 
2350  ret = decode(avctx, decoded_frame, got_output, pkt);
2351  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2352  if (ret < 0)
2353  *decode_failed = 1;
2354 
2355  if (ret >= 0 && avctx->sample_rate <= 0) {
2356  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2358  }
2359 
2360  if (ret != AVERROR_EOF)
2362 
2363  if (!*got_output || ret < 0)
2364  return ret;
2365 
2366  ist->samples_decoded += decoded_frame->nb_samples;
2367  ist->frames_decoded++;
2368 
2369  /* increment next_dts to use for the case where the input stream does not
2370  have timestamps or there are multiple frames in the packet */
2371  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2372  avctx->sample_rate;
2373  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2374  avctx->sample_rate;
2375 
2376  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2377  decoded_frame_tb = ist->st->time_base;
2378  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2379  decoded_frame->pts = pkt->pts;
2380  decoded_frame_tb = ist->st->time_base;
2381  }else {
2382  decoded_frame->pts = ist->dts;
2383  decoded_frame_tb = AV_TIME_BASE_Q;
2384  }
2386  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2387  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2388  (AVRational){1, avctx->sample_rate});
2389  ist->nb_samples = decoded_frame->nb_samples;
2391 
2392  av_frame_unref(ist->filter_frame);
2394  return err < 0 ? err : ret;
2395 }
2396 
2397 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2398  int *decode_failed)
2399 {
2401  int i, ret = 0, err = 0;
2402  int64_t best_effort_timestamp;
2403  int64_t dts = AV_NOPTS_VALUE;
2404 
2405  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2406  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2407  // skip the packet.
2408  if (!eof && pkt && pkt->size == 0)
2409  return 0;
2410 
2411  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2412  return AVERROR(ENOMEM);
2413  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2414  return AVERROR(ENOMEM);
2415  decoded_frame = ist->decoded_frame;
2416  if (ist->dts != AV_NOPTS_VALUE)
2417  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2418  if (pkt) {
2419  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2420  }
2421 
2422  // The old code used to set dts on the drain packet, which does not work
2423  // with the new API anymore.
2424  if (eof) {
2425  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2426  if (!new)
2427  return AVERROR(ENOMEM);
2428  ist->dts_buffer = new;
2429  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2430  }
2431 
2433  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2434  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2435  if (ret < 0)
2436  *decode_failed = 1;
2437 
2438  // The following line may be required in some cases where there is no parser
2439  // or the parser does not has_b_frames correctly
2440  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2441  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2442  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2443  } else
2444  av_log(ist->dec_ctx, AV_LOG_WARNING,
2445  "video_delay is larger in decoder than demuxer %d > %d.\n"
2446  "If you want to help, upload a sample "
2447  "of this file to https://streams.videolan.org/upload/ "
2448  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2449  ist->dec_ctx->has_b_frames,
2450  ist->st->codecpar->video_delay);
2451  }
2452 
2453  if (ret != AVERROR_EOF)
2455 
2456  if (*got_output && ret >= 0) {
2457  if (ist->dec_ctx->width != decoded_frame->width ||
2458  ist->dec_ctx->height != decoded_frame->height ||
2459  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2460  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2464  ist->dec_ctx->width,
2465  ist->dec_ctx->height,
2466  ist->dec_ctx->pix_fmt);
2467  }
2468  }
2469 
2470  if (!*got_output || ret < 0)
2471  return ret;
2472 
2473  if(ist->top_field_first>=0)
2474  decoded_frame->top_field_first = ist->top_field_first;
2475 
2476  ist->frames_decoded++;
2477 
2478  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2479  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2480  if (err < 0)
2481  goto fail;
2482  }
2483  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2484 
2485  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2486  *duration_pts = decoded_frame->pkt_duration;
2487 
2488  if (ist->framerate.num)
2489  best_effort_timestamp = ist->cfr_next_pts++;
2490 
2491  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2492  best_effort_timestamp = ist->dts_buffer[0];
2493 
2494  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2495  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2496  ist->nb_dts_buffer--;
2497  }
2498 
2499  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2500  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2501 
2502  if (ts != AV_NOPTS_VALUE)
2503  ist->next_pts = ist->pts = ts;
2504  }
2505 
2506  if (debug_ts) {
2507  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2508  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2509  ist->st->index, av_ts2str(decoded_frame->pts),
2510  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2511  best_effort_timestamp,
2512  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2514  ist->st->time_base.num, ist->st->time_base.den);
2515  }
2516 
2517  if (ist->st->sample_aspect_ratio.num)
2518  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2519 
2521 
2522 fail:
2523  av_frame_unref(ist->filter_frame);
2525  return err < 0 ? err : ret;
2526 }
2527 
2529  int *decode_failed)
2530 {
2532  int free_sub = 1;
2533  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2534  &subtitle, got_output, pkt);
2535 
2537 
2538  if (ret < 0 || !*got_output) {
2539  *decode_failed = 1;
2540  if (!pkt->size)
2542  return ret;
2543  }
2544 
2545  if (ist->fix_sub_duration) {
2546  int end = 1;
2547  if (ist->prev_sub.got_output) {
2548  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2549  1000, AV_TIME_BASE);
2550  if (end < ist->prev_sub.subtitle.end_display_time) {
2551  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2552  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2553  ist->prev_sub.subtitle.end_display_time, end,
2554  end <= 0 ? ", dropping it" : "");
2555  ist->prev_sub.subtitle.end_display_time = end;
2556  }
2557  }
2558  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2559  FFSWAP(int, ret, ist->prev_sub.ret);
2560  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2561  if (end <= 0)
2562  goto out;
2563  }
2564 
2565  if (!*got_output)
2566  return ret;
2567 
2568  if (ist->sub2video.frame) {
2569  sub2video_update(ist, INT64_MIN, &subtitle);
2570  } else if (ist->nb_filters) {
2571  if (!ist->sub2video.sub_queue)
2572  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2573  if (!ist->sub2video.sub_queue)
2574  exit_program(1);
2575  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2576  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2577  if (ret < 0)
2578  exit_program(1);
2579  }
2580  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2581  free_sub = 0;
2582  }
2583 
2584  if (!subtitle.num_rects)
2585  goto out;
2586 
2587  ist->frames_decoded++;
2588 
2589  for (i = 0; i < nb_output_streams; i++) {
2591 
2592  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2593  exit_program(1);
2595  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2596  continue;
2597 
2599  }
2600 
2601 out:
2602  if (free_sub)
2604  return ret;
2605 }
2606 
2608 {
2609  int i, ret;
2610  /* TODO keep pts also in stream time base to avoid converting back */
2611  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2613 
2614  for (i = 0; i < ist->nb_filters; i++) {
2615  ret = ifilter_send_eof(ist->filters[i], pts);
2616  if (ret < 0)
2617  return ret;
2618  }
2619  return 0;
2620 }
2621 
2622 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2623 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2624 {
2625  int ret = 0, i;
2626  int repeating = 0;
2627  int eof_reached = 0;
2628 
2629  AVPacket *avpkt;
2630 
2631  if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2632  return AVERROR(ENOMEM);
2633  avpkt = ist->pkt;
2634 
2635  if (!ist->saw_first_ts) {
2636  ist->first_dts =
2637  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2638  ist->pts = 0;
2639  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2640  ist->first_dts =
2641  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2642  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2643  }
2644  ist->saw_first_ts = 1;
2645  }
2646 
2647  if (ist->next_dts == AV_NOPTS_VALUE)
2648  ist->next_dts = ist->dts;
2649  if (ist->next_pts == AV_NOPTS_VALUE)
2650  ist->next_pts = ist->pts;
2651 
2652  if (pkt) {
2653  av_packet_unref(avpkt);
2654  ret = av_packet_ref(avpkt, pkt);
2655  if (ret < 0)
2656  return ret;
2657  }
2658 
2659  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2660  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2661  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2662  ist->next_pts = ist->pts = ist->dts;
2663  }
2664 
2665  // while we have more to decode or while the decoder did output something on EOF
2666  while (ist->decoding_needed) {
2667  int64_t duration_dts = 0;
2668  int64_t duration_pts = 0;
2669  int got_output = 0;
2670  int decode_failed = 0;
2671 
2672  ist->pts = ist->next_pts;
2673  ist->dts = ist->next_dts;
2674 
2675  switch (ist->dec_ctx->codec_type) {
2676  case AVMEDIA_TYPE_AUDIO:
2677  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2678  &decode_failed);
2679  av_packet_unref(avpkt);
2680  break;
2681  case AVMEDIA_TYPE_VIDEO:
2682  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2683  &decode_failed);
2684  if (!repeating || !pkt || got_output) {
2685  if (pkt && pkt->duration) {
2686  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2687  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2688  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2689  duration_dts = ((int64_t)AV_TIME_BASE *
2690  ist->dec_ctx->framerate.den * ticks) /
2691  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2692  }
2693 
2694  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2695  ist->next_dts += duration_dts;
2696  }else
2697  ist->next_dts = AV_NOPTS_VALUE;
2698  }
2699 
2700  if (got_output) {
2701  if (duration_pts > 0) {
2702  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2703  } else {
2704  ist->next_pts += duration_dts;
2705  }
2706  }
2707  av_packet_unref(avpkt);
2708  break;
2709  case AVMEDIA_TYPE_SUBTITLE:
2710  if (repeating)
2711  break;
2712  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2713  if (!pkt && ret >= 0)
2714  ret = AVERROR_EOF;
2715  av_packet_unref(avpkt);
2716  break;
2717  default:
2718  return -1;
2719  }
2720 
2721  if (ret == AVERROR_EOF) {
2722  eof_reached = 1;
2723  break;
2724  }
2725 
2726  if (ret < 0) {
2727  if (decode_failed) {
2728  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2729  ist->file_index, ist->st->index, av_err2str(ret));
2730  } else {
2731  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2732  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2733  }
2734  if (!decode_failed || exit_on_error)
2735  exit_program(1);
2736  break;
2737  }
2738 
2739  if (got_output)
2740  ist->got_output = 1;
2741 
2742  if (!got_output)
2743  break;
2744 
2745  // During draining, we might get multiple output frames in this loop.
2746  // ffmpeg.c does not drain the filter chain on configuration changes,
2747  // which means if we send multiple frames at once to the filters, and
2748  // one of those frames changes configuration, the buffered frames will
2749  // be lost. This can upset certain FATE tests.
2750  // Decode only 1 frame per call on EOF to appease these FATE tests.
2751  // The ideal solution would be to rewrite decoding to use the new
2752  // decoding API in a better way.
2753  if (!pkt)
2754  break;
2755 
2756  repeating = 1;
2757  }
2758 
2759  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2760  /* except when looping we need to flush but not to send an EOF */
2761  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2762  int ret = send_filter_eof(ist);
2763  if (ret < 0) {
2764  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2765  exit_program(1);
2766  }
2767  }
2768 
2769  /* handle stream copy */
2770  if (!ist->decoding_needed && pkt) {
2771  ist->dts = ist->next_dts;
2772  switch (ist->dec_ctx->codec_type) {
2773  case AVMEDIA_TYPE_AUDIO:
2774  av_assert1(pkt->duration >= 0);
2775  if (ist->dec_ctx->sample_rate) {
2776  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2777  ist->dec_ctx->sample_rate;
2778  } else {
2779  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2780  }
2781  break;
2782  case AVMEDIA_TYPE_VIDEO:
2783  if (ist->framerate.num) {
2784  // TODO: Remove work-around for c99-to-c89 issue 7
2785  AVRational time_base_q = AV_TIME_BASE_Q;
2786  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2787  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2788  } else if (pkt->duration) {
2789  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2790  } else if(ist->dec_ctx->framerate.num != 0) {
2791  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2792  ist->next_dts += ((int64_t)AV_TIME_BASE *
2793  ist->dec_ctx->framerate.den * ticks) /
2794  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2795  }
2796  break;
2797  }
2798  ist->pts = ist->dts;
2799  ist->next_pts = ist->next_dts;
2800  }
2801  for (i = 0; i < nb_output_streams; i++) {
2803 
2804  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2805  exit_program(1);
2807  continue;
2808 
2809  do_streamcopy(ist, ost, pkt);
2810  }
2811 
2812  return !eof_reached;
2813 }
2814 
2815 static void print_sdp(void)
2816 {
2817  char sdp[16384];
2818  int i;
2819  int j;
2820  AVIOContext *sdp_pb;
2821  AVFormatContext **avc;
2822 
2823  for (i = 0; i < nb_output_files; i++) {
2824  if (!output_files[i]->header_written)
2825  return;
2826  }
2827 
2828  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2829  if (!avc)
2830  exit_program(1);
2831  for (i = 0, j = 0; i < nb_output_files; i++) {
2832  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2833  avc[j] = output_files[i]->ctx;
2834  j++;
2835  }
2836  }
2837 
2838  if (!j)
2839  goto fail;
2840 
2841  av_sdp_create(avc, j, sdp, sizeof(sdp));
2842 
2843  if (!sdp_filename) {
2844  printf("SDP:\n%s\n", sdp);
2845  fflush(stdout);
2846  } else {
2847  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2848  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2849  } else {
2850  avio_print(sdp_pb, sdp);
2851  avio_closep(&sdp_pb);
2853  }
2854  }
2855 
2856 fail:
2857  av_freep(&avc);
2858 }
2859 
2861 {
2862  InputStream *ist = s->opaque;
2863  const enum AVPixelFormat *p;
2864  int ret;
2865 
2866  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2868  const AVCodecHWConfig *config = NULL;
2869  int i;
2870 
2871  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2872  break;
2873 
2874  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2875  ist->hwaccel_id == HWACCEL_AUTO) {
2876  for (i = 0;; i++) {
2877  config = avcodec_get_hw_config(s->codec, i);
2878  if (!config)
2879  break;
2880  if (!(config->methods &
2882  continue;
2883  if (config->pix_fmt == *p)
2884  break;
2885  }
2886  }
2887  if (config) {
2888  if (config->device_type != ist->hwaccel_device_type) {
2889  // Different hwaccel offered, ignore.
2890  continue;
2891  }
2892 
2894  if (ret < 0) {
2895  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2897  "%s hwaccel requested for input stream #%d:%d, "
2898  "but cannot be initialized.\n",
2899  av_hwdevice_get_type_name(config->device_type),
2900  ist->file_index, ist->st->index);
2901  return AV_PIX_FMT_NONE;
2902  }
2903  continue;
2904  }
2905  } else {
2906  const HWAccel *hwaccel = NULL;
2907  int i;
2908  for (i = 0; hwaccels[i].name; i++) {
2909  if (hwaccels[i].pix_fmt == *p) {
2910  hwaccel = &hwaccels[i];
2911  break;
2912  }
2913  }
2914  if (!hwaccel) {
2915  // No hwaccel supporting this pixfmt.
2916  continue;
2917  }
2918  if (hwaccel->id != ist->hwaccel_id) {
2919  // Does not match requested hwaccel.
2920  continue;
2921  }
2922 
2923  ret = hwaccel->init(s);
2924  if (ret < 0) {
2926  "%s hwaccel requested for input stream #%d:%d, "
2927  "but cannot be initialized.\n", hwaccel->name,
2928  ist->file_index, ist->st->index);
2929  return AV_PIX_FMT_NONE;
2930  }
2931  }
2932 
2933  if (ist->hw_frames_ctx) {
2934  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2935  if (!s->hw_frames_ctx)
2936  return AV_PIX_FMT_NONE;
2937  }
2938 
2939  ist->hwaccel_pix_fmt = *p;
2940  break;
2941  }
2942 
2943  return *p;
2944 }
2945 
2947 {
2948  InputStream *ist = s->opaque;
2949 
2950  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2951  return ist->hwaccel_get_buffer(s, frame, flags);
2952 
2954 }
2955 
2956 static int init_input_stream(int ist_index, char *error, int error_len)
2957 {
2958  int ret;
2959  InputStream *ist = input_streams[ist_index];
2960 
2961  if (ist->decoding_needed) {
2962  const AVCodec *codec = ist->dec;
2963  if (!codec) {
2964  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2965  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2966  return AVERROR(EINVAL);
2967  }
2968 
2969  ist->dec_ctx->opaque = ist;
2970  ist->dec_ctx->get_format = get_format;
2971  ist->dec_ctx->get_buffer2 = get_buffer;
2972 #if LIBAVCODEC_VERSION_MAJOR < 60
2974  ist->dec_ctx->thread_safe_callbacks = 1;
2976 #endif
2977 
2978  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2979  (ist->decoding_needed & DECODING_FOR_OST)) {
2980  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2981  if (ist->decoding_needed & DECODING_FOR_FILTER)
2982  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2983  }
2984 
2985  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2986 
2987  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2988  * audio, and video decoders such as cuvid or mediacodec */
2989  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2990 
2991  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2992  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2993  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2994  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2995  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2996 
2998  if (ret < 0) {
2999  snprintf(error, error_len, "Device setup failed for "
3000  "decoder on input stream #%d:%d : %s",
3001  ist->file_index, ist->st->index, av_err2str(ret));
3002  return ret;
3003  }
3004 
3005  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3006  if (ret == AVERROR_EXPERIMENTAL)
3007  abort_codec_experimental(codec, 0);
3008 
3009  snprintf(error, error_len,
3010  "Error while opening decoder for input stream "
3011  "#%d:%d : %s",
3012  ist->file_index, ist->st->index, av_err2str(ret));
3013  return ret;
3014  }
3015  assert_avoptions(ist->decoder_opts);
3016  }
3017 
3018  ist->next_pts = AV_NOPTS_VALUE;
3019  ist->next_dts = AV_NOPTS_VALUE;
3020 
3021  return 0;
3022 }
3023 
3025 {
3026  if (ost->source_index >= 0)
3027  return input_streams[ost->source_index];
3028  return NULL;
3029 }
3030 
3031 static int compare_int64(const void *a, const void *b)
3032 {
3033  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3034 }
3035 
3036 /* open the muxer when all the streams are initialized */
3038 {
3039  int ret, i;
3040 
3041  for (i = 0; i < of->ctx->nb_streams; i++) {
3043  if (!ost->initialized)
3044  return 0;
3045  }
3046 
3047  of->ctx->interrupt_callback = int_cb;
3048 
3049  ret = avformat_write_header(of->ctx, &of->opts);
3050  if (ret < 0) {
3052  "Could not write header for output file #%d "
3053  "(incorrect codec parameters ?): %s\n",
3055  return ret;
3056  }
3057  //assert_avoptions(of->opts);
3058  of->header_written = 1;
3059 
3060  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3061  nb_output_dumped++;
3062 
3063  if (sdp_filename || want_sdp)
3064  print_sdp();
3065 
3066  /* flush the muxing queues */
3067  for (i = 0; i < of->ctx->nb_streams; i++) {
3069 
3070  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3071  if (!av_fifo_size(ost->muxing_queue))
3073 
3074  while (av_fifo_size(ost->muxing_queue)) {
3075  AVPacket *pkt;
3078  write_packet(of, pkt, ost, 1);
3079  av_packet_free(&pkt);
3080  }
3081  }
3082 
3083  return 0;
3084 }
3085 
3087 {
3089  int ret;
3090 
3091  if (!ctx)
3092  return 0;
3093 
3094  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3095  if (ret < 0)
3096  return ret;
3097 
3098  ctx->time_base_in = ost->st->time_base;
3099 
3100  ret = av_bsf_init(ctx);
3101  if (ret < 0) {
3102  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3103  ctx->filter->name);
3104  return ret;
3105  }
3106 
3107  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3108  if (ret < 0)
3109  return ret;
3110  ost->st->time_base = ctx->time_base_out;
3111 
3112  return 0;
3113 }
3114 
3116 {
3119  AVCodecParameters *par_dst = ost->st->codecpar;
3120  AVCodecParameters *par_src = ost->ref_par;
3121  AVRational sar;
3122  int i, ret;
3123  uint32_t codec_tag = par_dst->codec_tag;
3124 
3125  av_assert0(ist && !ost->filter);
3126 
3127  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3128  if (ret >= 0)
3130  if (ret < 0) {
3132  "Error setting up codec context options.\n");
3133  return ret;
3134  }
3135 
3137  if (ret < 0) {
3139  "Error getting reference codec parameters.\n");
3140  return ret;
3141  }
3142 
3143  if (!codec_tag) {
3144  unsigned int codec_tag_tmp;
3145  if (!of->ctx->oformat->codec_tag ||
3146  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3147  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3148  codec_tag = par_src->codec_tag;
3149  }
3150 
3151  ret = avcodec_parameters_copy(par_dst, par_src);
3152  if (ret < 0)
3153  return ret;
3154 
3155  par_dst->codec_tag = codec_tag;
3156 
3157  if (!ost->frame_rate.num)
3158  ost->frame_rate = ist->framerate;
3159 
3160  if (ost->frame_rate.num)
3162  else
3163  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3164 
3166  if (ret < 0)
3167  return ret;
3168 
3169  // copy timebase while removing common factors
3170  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3171  if (ost->frame_rate.num)
3173  else
3175  }
3176 
3177  // copy estimated duration as a hint to the muxer
3178  if (ost->st->duration <= 0 && ist->st->duration > 0)
3179  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3180 
3181  // copy disposition
3182  ost->st->disposition = ist->st->disposition;
3183 
3184  if (ist->st->nb_side_data) {
3185  for (i = 0; i < ist->st->nb_side_data; i++) {
3186  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3187  uint8_t *dst_data;
3188 
3189  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3190  if (!dst_data)
3191  return AVERROR(ENOMEM);
3192  memcpy(dst_data, sd_src->data, sd_src->size);
3193  }
3194  }
3195 
3196  if (ost->rotate_overridden) {
3198  sizeof(int32_t) * 9);
3199  if (sd)
3201  }
3202 
3203  switch (par_dst->codec_type) {
3204  case AVMEDIA_TYPE_AUDIO:
3205  if (audio_volume != 256) {
3206  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3207  exit_program(1);
3208  }
3209  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3210  par_dst->block_align= 0;
3211  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3212  par_dst->block_align= 0;
3213  break;
3214  case AVMEDIA_TYPE_VIDEO:
3215  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3216  sar =
3218  (AVRational){ par_dst->height, par_dst->width });
3219  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3220  "with stream copy may produce invalid files\n");
3221  }
3222  else if (ist->st->sample_aspect_ratio.num)
3223  sar = ist->st->sample_aspect_ratio;
3224  else
3225  sar = par_src->sample_aspect_ratio;
3226  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3227  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3228  ost->st->r_frame_rate = ist->st->r_frame_rate;
3229  break;
3230  }
3231 
3232  ost->mux_timebase = ist->st->time_base;
3233 
3234  return 0;
3235 }
3236 
3238 {
3239  AVDictionaryEntry *e;
3240 
3241  uint8_t *encoder_string;
3242  int encoder_string_len;
3243  int format_flags = 0;
3244  int codec_flags = ost->enc_ctx->flags;
3245 
3246  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3247  return;
3248 
3249  e = av_dict_get(of->opts, "fflags", NULL, 0);
3250  if (e) {
3251  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3252  if (!o)
3253  return;
3254  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3255  }
3256  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3257  if (e) {
3258  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3259  if (!o)
3260  return;
3262  }
3263 
3264  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3265  encoder_string = av_mallocz(encoder_string_len);
3266  if (!encoder_string)
3267  exit_program(1);
3268 
3269  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3270  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3271  else
3272  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3273  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3274  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3276 }
3277 
3279  AVCodecContext *avctx)
3280 {
3281  char *p;
3282  int n = 1, i, size, index = 0;
3283  int64_t t, *pts;
3284 
3285  for (p = kf; *p; p++)
3286  if (*p == ',')
3287  n++;
3288  size = n;
3289  pts = av_malloc_array(size, sizeof(*pts));
3290  if (!pts) {
3291  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3292  exit_program(1);
3293  }
3294 
3295  p = kf;
3296  for (i = 0; i < n; i++) {
3297  char *next = strchr(p, ',');
3298 
3299  if (next)
3300  *next++ = 0;
3301 
3302  if (!memcmp(p, "chapters", 8)) {
3303 
3305  int j;
3306 
3307  if (avf->nb_chapters > INT_MAX - size ||
3308  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3309  sizeof(*pts)))) {
3311  "Could not allocate forced key frames array.\n");
3312  exit_program(1);
3313  }
3314  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3315  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3316 
3317  for (j = 0; j < avf->nb_chapters; j++) {
3318  AVChapter *c = avf->chapters[j];
3319  av_assert1(index < size);
3320  pts[index++] = av_rescale_q(c->start, c->time_base,
3321  avctx->time_base) + t;
3322  }
3323 
3324  } else {
3325 
3326  t = parse_time_or_die("force_key_frames", p, 1);
3327  av_assert1(index < size);
3328  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3329 
3330  }
3331 
3332  p = next;
3333  }
3334 
3335  av_assert0(index == size);
3336  qsort(pts, size, sizeof(*pts), compare_int64);
3338  ost->forced_kf_pts = pts;
3339 }
3340 
3341 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3342 {
3344  AVCodecContext *enc_ctx = ost->enc_ctx;
3345  AVFormatContext *oc;
3346 
3347  if (ost->enc_timebase.num > 0) {
3348  enc_ctx->time_base = ost->enc_timebase;
3349  return;
3350  }
3351 
3352  if (ost->enc_timebase.num < 0) {
3353  if (ist) {
3354  enc_ctx->time_base = ist->st->time_base;
3355  return;
3356  }
3357 
3358  oc = output_files[ost->file_index]->ctx;
3359  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3360  }
3361 
3362  enc_ctx->time_base = default_time_base;
3363 }
3364 
3366 {
3368  AVCodecContext *enc_ctx = ost->enc_ctx;
3371  int j, ret;
3372 
3374 
3375  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3376  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3377  // which have to be filtered out to prevent leaking them to output files.
3378  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3379 
3380  if (ist) {
3381  ost->st->disposition = ist->st->disposition;
3382 
3383  dec_ctx = ist->dec_ctx;
3384 
3386  } else {
3387  for (j = 0; j < oc->nb_streams; j++) {
3388  AVStream *st = oc->streams[j];
3389  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3390  break;
3391  }
3392  if (j == oc->nb_streams)
3396  }
3397 
3398  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3399  if (!ost->frame_rate.num)
3401  if (ist && !ost->frame_rate.num)
3402  ost->frame_rate = ist->framerate;
3403  if (ist && !ost->frame_rate.num)
3404  ost->frame_rate = ist->st->r_frame_rate;
3405  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3406  ost->frame_rate = (AVRational){25, 1};
3408  "No information "
3409  "about the input framerate is available. Falling "
3410  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3411  "if you want a different framerate.\n",
3412  ost->file_index, ost->index);
3413  }
3414 
3415  if (ost->max_frame_rate.num &&
3417  !ost->frame_rate.den))
3419 
3420  if (ost->enc->supported_framerates && !ost->force_fps) {
3421  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3422  ost->frame_rate = ost->enc->supported_framerates[idx];
3423  }
3424  // reduce frame rate for mpeg4 to be within the spec limits
3425  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3427  ost->frame_rate.num, ost->frame_rate.den, 65535);
3428  }
3429  }
3430 
3431  switch (enc_ctx->codec_type) {
3432  case AVMEDIA_TYPE_AUDIO:
3434  if (dec_ctx)
3436  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3440 
3442  break;
3443 
3444  case AVMEDIA_TYPE_VIDEO:
3446 
3447  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3449  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3451  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3452  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3453  }
3454 
3455  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3456  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3458  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3459  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3461 
3463  if (dec_ctx)
3465  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3466 
3467  if (frame) {
3468  enc_ctx->color_range = frame->color_range;
3469  enc_ctx->color_primaries = frame->color_primaries;
3470  enc_ctx->color_trc = frame->color_trc;
3471  enc_ctx->colorspace = frame->colorspace;
3472  enc_ctx->chroma_sample_location = frame->chroma_location;
3473  }
3474 
3475  enc_ctx->framerate = ost->frame_rate;
3476 
3478 
3479  if (!dec_ctx ||
3480  enc_ctx->width != dec_ctx->width ||
3481  enc_ctx->height != dec_ctx->height ||
3482  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3484  }
3485 
3486  // Field order: autodetection
3487  if (frame) {
3489  ost->top_field_first >= 0)
3490  frame->top_field_first = !!ost->top_field_first;
3491 
3492  if (frame->interlaced_frame) {
3493  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3494  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3495  else
3496  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3497  } else
3498  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3499  }
3500 
3501  // Field order: override
3502  if (ost->top_field_first == 0) {
3503  enc_ctx->field_order = AV_FIELD_BB;
3504  } else if (ost->top_field_first == 1) {
3505  enc_ctx->field_order = AV_FIELD_TT;
3506  }
3507 
3508  if (ost->forced_keyframes) {
3509  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3512  if (ret < 0) {
3514  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3515  return ret;
3516  }
3521 
3522  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3523  // parse it only for static kf timings
3524  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3526  }
3527  }
3528  break;
3529  case AVMEDIA_TYPE_SUBTITLE:
3530  enc_ctx->time_base = AV_TIME_BASE_Q;
3531  if (!enc_ctx->width) {
3534  }
3535  break;
3536  case AVMEDIA_TYPE_DATA:
3537  break;
3538  default:
3539  abort();
3540  break;
3541  }
3542 
3543  ost->mux_timebase = enc_ctx->time_base;
3544 
3545  return 0;
3546 }
3547 
3549  char *error, int error_len)
3550 {
3551  int ret = 0;
3552 
3553  if (ost->encoding_needed) {
3554  const AVCodec *codec = ost->enc;
3555  AVCodecContext *dec = NULL;
3556  InputStream *ist;
3557 
3559  if (ret < 0)
3560  return ret;
3561 
3562  if ((ist = get_input_stream(ost)))
3563  dec = ist->dec_ctx;
3564  if (dec && dec->subtitle_header) {
3565  /* ASS code assumes this buffer is null terminated so add extra byte. */
3566  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3567  if (!ost->enc_ctx->subtitle_header)
3568  return AVERROR(ENOMEM);
3569  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3570  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3571  }
3572  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3573  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3574  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3575  !codec->defaults &&
3576  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3577  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3578  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3579 
3581  if (ret < 0) {
3582  snprintf(error, error_len, "Device setup failed for "
3583  "encoder on output stream #%d:%d : %s",
3585  return ret;
3586  }
3587 
3588  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3589  int input_props = 0, output_props = 0;
3590  AVCodecDescriptor const *input_descriptor =
3591  avcodec_descriptor_get(dec->codec_id);
3592  AVCodecDescriptor const *output_descriptor =
3594  if (input_descriptor)
3595  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3596  if (output_descriptor)
3597  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3598  if (input_props && output_props && input_props != output_props) {
3599  snprintf(error, error_len,
3600  "Subtitle encoding currently only possible from text to text "
3601  "or bitmap to bitmap");
3602  return AVERROR_INVALIDDATA;
3603  }
3604  }
3605 
3606  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3607  if (ret == AVERROR_EXPERIMENTAL)
3608  abort_codec_experimental(codec, 1);
3609  snprintf(error, error_len,
3610  "Error while opening encoder for output stream #%d:%d - "
3611  "maybe incorrect parameters such as bit_rate, rate, width or height",
3612  ost->file_index, ost->index);
3613  return ret;
3614  }
3615  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3616  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3618  ost->enc_ctx->frame_size);
3620  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3621  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3622  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3623  " It takes bits/s as argument, not kbits/s\n");
3624 
3626  if (ret < 0) {
3628  "Error initializing the output stream codec context.\n");
3629  exit_program(1);
3630  }
3631 
3632  if (ost->enc_ctx->nb_coded_side_data) {
3633  int i;
3634 
3635  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3636  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3637  uint8_t *dst_data;
3638 
3639  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3640  if (!dst_data)
3641  return AVERROR(ENOMEM);
3642  memcpy(dst_data, sd_src->data, sd_src->size);
3643  }
3644  }
3645 
3646  /*
3647  * Add global input side data. For now this is naive, and copies it
3648  * from the input stream's global side data. All side data should
3649  * really be funneled over AVFrame and libavfilter, then added back to
3650  * packet side data, and then potentially using the first packet for
3651  * global side data.
3652  */
3653  if (ist) {
3654  int i;
3655  for (i = 0; i < ist->st->nb_side_data; i++) {
3656  AVPacketSideData *sd = &ist->st->side_data[i];
3657  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3658  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3659  if (!dst)
3660  return AVERROR(ENOMEM);
3661  memcpy(dst, sd->data, sd->size);
3662  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3663  av_display_rotation_set((uint32_t *)dst, 0);
3664  }
3665  }
3666  }
3667 
3668  // copy timebase while removing common factors
3669  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3671 
3672  // copy estimated duration as a hint to the muxer
3673  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3674  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3675  } else if (ost->stream_copy) {
3677  if (ret < 0)
3678  return ret;
3679  }
3680 
3681  // parse user provided disposition, and update stream values
3682  if (ost->disposition) {
3683  static const AVOption opts[] = {
3684  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3685  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3686  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3687  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3688  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3689  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3690  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3691  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3692  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3693  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3694  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3695  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3696  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3697  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3698  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3699  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3700  { NULL },
3701  };
3702  static const AVClass class = {
3703  .class_name = "",
3704  .item_name = av_default_item_name,
3705  .option = opts,
3706  .version = LIBAVUTIL_VERSION_INT,
3707  };
3708  const AVClass *pclass = &class;
3709 
3710  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3711  if (ret < 0)
3712  return ret;
3713  }
3714 
3715  /* initialize bitstream filters for the output stream
3716  * needs to be done here, because the codec id for streamcopy is not
3717  * known until now */
3719  if (ret < 0)
3720  return ret;
3721 
3722  ost->initialized = 1;
3723 
3725  if (ret < 0)
3726  return ret;
3727 
3728  return ret;
3729 }
3730 
3731 static void report_new_stream(int input_index, AVPacket *pkt)
3732 {
3733  InputFile *file = input_files[input_index];
3734  AVStream *st = file->ctx->streams[pkt->stream_index];
3735 
3736  if (pkt->stream_index < file->nb_streams_warn)
3737  return;
3738  av_log(file->ctx, AV_LOG_WARNING,
3739  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3741  input_index, pkt->stream_index,
3742  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3743  file->nb_streams_warn = pkt->stream_index + 1;
3744 }
3745 
3746 static int transcode_init(void)
3747 {
3748  int ret = 0, i, j, k;
3749  AVFormatContext *oc;
3750  OutputStream *ost;
3751  InputStream *ist;
3752  char error[1024] = {0};
3753 
3754  for (i = 0; i < nb_filtergraphs; i++) {
3755  FilterGraph *fg = filtergraphs[i];
3756  for (j = 0; j < fg->nb_outputs; j++) {
3757  OutputFilter *ofilter = fg->outputs[j];
3758  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3759  continue;
3760  if (fg->nb_inputs != 1)
3761  continue;
3762  for (k = nb_input_streams-1; k >= 0 ; k--)
3763  if (fg->inputs[0]->ist == input_streams[k])
3764  break;
3765  ofilter->ost->source_index = k;
3766  }
3767  }
3768 
3769  /* init framerate emulation */
3770  for (i = 0; i < nb_input_files; i++) {
3772  if (ifile->readrate || ifile->rate_emu)
3773  for (j = 0; j < ifile->nb_streams; j++)
3774  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3775  }
3776 
3777  /* init input streams */
3778  for (i = 0; i < nb_input_streams; i++)
3779  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3780  for (i = 0; i < nb_output_streams; i++) {
3781  ost = output_streams[i];
3783  }
3784  goto dump_format;
3785  }
3786 
3787  /*
3788  * initialize stream copy and subtitle/data streams.
3789  * Encoded AVFrame based streams will get initialized as follows:
3790  * - when the first AVFrame is received in do_video_out
3791  * - just before the first AVFrame is received in either transcode_step
3792  * or reap_filters due to us requiring the filter chain buffer sink
3793  * to be configured with the correct audio frame size, which is only
3794  * known after the encoder is initialized.
3795  */
3796  for (i = 0; i < nb_output_streams; i++) {
3797  if (!output_streams[i]->stream_copy &&
3798  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3800  continue;
3801 
3803  if (ret < 0)
3804  goto dump_format;
3805  }
3806 
3807  /* discard unused programs */
3808  for (i = 0; i < nb_input_files; i++) {
3810  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3811  AVProgram *p = ifile->ctx->programs[j];
3812  int discard = AVDISCARD_ALL;
3813 
3814  for (k = 0; k < p->nb_stream_indexes; k++)
3815  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3816  discard = AVDISCARD_DEFAULT;
3817  break;
3818  }
3819  p->discard = discard;
3820  }
3821  }
3822 
3823  /* write headers for files with no streams */
3824  for (i = 0; i < nb_output_files; i++) {
3825  oc = output_files[i]->ctx;
3826  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3828  if (ret < 0)
3829  goto dump_format;
3830  }
3831  }
3832 
3833  dump_format:
3834  /* dump the stream mapping */
3835  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3836  for (i = 0; i < nb_input_streams; i++) {
3837  ist = input_streams[i];
3838 
3839  for (j = 0; j < ist->nb_filters; j++) {
3840  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3841  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3842  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3843  ist->filters[j]->name);
3844  if (nb_filtergraphs > 1)
3845  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3846  av_log(NULL, AV_LOG_INFO, "\n");
3847  }
3848  }
3849  }
3850 
3851  for (i = 0; i < nb_output_streams; i++) {
3852  ost = output_streams[i];
3853 
3854  if (ost->attachment_filename) {
3855  /* an attached file */
3856  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3858  continue;
3859  }
3860 
3862  /* output from a complex graph */
3863  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3864  if (nb_filtergraphs > 1)
3865  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3866 
3867  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3868  ost->index, ost->enc ? ost->enc->name : "?");
3869  continue;
3870  }
3871 
3872  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3875  ost->file_index,
3876  ost->index);
3878  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3880  ost->sync_ist->st->index);
3881  if (ost->stream_copy)
3882  av_log(NULL, AV_LOG_INFO, " (copy)");
3883  else {
3884  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3885  const AVCodec *out_codec = ost->enc;
3886  const char *decoder_name = "?";
3887  const char *in_codec_name = "?";
3888  const char *encoder_name = "?";
3889  const char *out_codec_name = "?";
3890  const AVCodecDescriptor *desc;
3891 
3892  if (in_codec) {
3893  decoder_name = in_codec->name;
3894  desc = avcodec_descriptor_get(in_codec->id);
3895  if (desc)
3896  in_codec_name = desc->name;
3897  if (!strcmp(decoder_name, in_codec_name))
3898  decoder_name = "native";
3899  }
3900 
3901  if (out_codec) {
3902  encoder_name = out_codec->name;
3903  desc = avcodec_descriptor_get(out_codec->id);
3904  if (desc)
3905  out_codec_name = desc->name;
3906  if (!strcmp(encoder_name, out_codec_name))
3907  encoder_name = "native";
3908  }
3909 
3910  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3911  in_codec_name, decoder_name,
3912  out_codec_name, encoder_name);
3913  }
3914  av_log(NULL, AV_LOG_INFO, "\n");
3915  }
3916 
3917  if (ret) {
3918  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3919  return ret;
3920  }
3921 
3923 
3924  return 0;
3925 }
3926 
3927 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3928 static int need_output(void)
3929 {
3930  int i;
3931 
3932  for (i = 0; i < nb_output_streams; i++) {
3936 
3937  if (ost->finished ||
3938  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3939  continue;
3940  if (ost->frame_number >= ost->max_frames) {
3941  int j;
3942  for (j = 0; j < of->ctx->nb_streams; j++)
3944  continue;
3945  }
3946 
3947  return 1;
3948  }
3949 
3950  return 0;
3951 }
3952 
3953 /**
3954  * Select the output stream to process.
3955  *
3956  * @return selected output stream, or NULL if none available
3957  */
3959 {
3960  int i;
3961  int64_t opts_min = INT64_MAX;
3962  OutputStream *ost_min = NULL;
3963 
3964  for (i = 0; i < nb_output_streams; i++) {
3966  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3968  AV_TIME_BASE_Q);
3971  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3973 
3974  if (!ost->initialized && !ost->inputs_done)
3975  return ost->unavailable ? NULL : ost;
3976 
3977  if (!ost->finished && opts < opts_min) {
3978  opts_min = opts;
3979  ost_min = ost->unavailable ? NULL : ost;
3980  }
3981  }
3982  return ost_min;
3983 }
3984 
3985 static void set_tty_echo(int on)
3986 {
3987 #if HAVE_TERMIOS_H
3988  struct termios tty;
3989  if (tcgetattr(0, &tty) == 0) {
3990  if (on) tty.c_lflag |= ECHO;
3991  else tty.c_lflag &= ~ECHO;
3992  tcsetattr(0, TCSANOW, &tty);
3993  }
3994 #endif
3995 }
3996 
3997 static int check_keyboard_interaction(int64_t cur_time)
3998 {
3999  int i, ret, key;
4000  static int64_t last_time;
4001  if (received_nb_signals)
4002  return AVERROR_EXIT;
4003  /* read_key() returns 0 on EOF */
4004  if(cur_time - last_time >= 100000 && !run_as_daemon){
4005  key = read_key();
4006  last_time = cur_time;
4007  }else
4008  key = -1;
4009  if (key == 'q')
4010  return AVERROR_EXIT;
4011  if (key == '+') av_log_set_level(av_log_get_level()+10);
4012  if (key == '-') av_log_set_level(av_log_get_level()-10);
4013  if (key == 's') qp_hist ^= 1;
4014  if (key == 'h'){
4015  if (do_hex_dump){
4016  do_hex_dump = do_pkt_dump = 0;
4017  } else if(do_pkt_dump){
4018  do_hex_dump = 1;
4019  } else
4020  do_pkt_dump = 1;
4022  }
4023  if (key == 'c' || key == 'C'){
4024  char buf[4096], target[64], command[256], arg[256] = {0};
4025  double time;
4026  int k, n = 0;
4027  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4028  i = 0;
4029  set_tty_echo(1);
4030  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4031  if (k > 0)
4032  buf[i++] = k;
4033  buf[i] = 0;
4034  set_tty_echo(0);
4035  fprintf(stderr, "\n");
4036  if (k > 0 &&
4037  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4038  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4039  target, time, command, arg);
4040  for (i = 0; i < nb_filtergraphs; i++) {
4041  FilterGraph *fg = filtergraphs[i];
4042  if (fg->graph) {
4043  if (time < 0) {
4044  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4045  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4046  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4047  } else if (key == 'c') {
4048  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4050  } else {
4051  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4052  if (ret < 0)
4053  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4054  }
4055  }
4056  }
4057  } else {
4059  "Parse error, at least 3 arguments were expected, "
4060  "only %d given in string '%s'\n", n, buf);
4061  }
4062  }
4063  if (key == 'd' || key == 'D'){
4064  int debug=0;
4065  if(key == 'D') {
4066  debug = input_streams[0]->dec_ctx->debug << 1;
4067  if(!debug) debug = 1;
4068  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4069  debug +=