FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
133 
134 static int want_sdp = 1;
135 
136 static int current_time;
138 
140 
145 
150 
153 
154 #if HAVE_TERMIOS_H
155 
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
159 #endif
160 
161 #if HAVE_THREADS
162 static void free_input_threads(void);
163 #endif
164 
165 /* sub2video hack:
166  Convert subtitles to video with alpha to insert them in filter graphs.
167  This is a temporary solution until libavfilter gets real subtitles support.
168  */
169 
171 {
172  int ret;
173  AVFrame *frame = ist->sub2video.frame;
174 
175  av_frame_unref(frame);
176  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
179  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180  return ret;
181  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
182  return 0;
183 }
184 
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186  AVSubtitleRect *r)
187 {
188  uint32_t *pal, *dst2;
189  uint8_t *src, *src2;
190  int x, y;
191 
192  if (r->type != SUBTITLE_BITMAP) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194  return;
195  }
196  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198  r->x, r->y, r->w, r->h, w, h
199  );
200  return;
201  }
202 
203  dst += r->y * dst_linesize + r->x * 4;
204  src = r->data[0];
205  pal = (uint32_t *)r->data[1];
206  for (y = 0; y < r->h; y++) {
207  dst2 = (uint32_t *)dst;
208  src2 = src;
209  for (x = 0; x < r->w; x++)
210  *(dst2++) = pal[*(src2++)];
211  dst += dst_linesize;
212  src += r->linesize[0];
213  }
214 }
215 
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 {
218  AVFrame *frame = ist->sub2video.frame;
219  int i;
220  int ret;
221 
222  av_assert1(frame->data[0]);
223  ist->sub2video.last_pts = frame->pts = pts;
224  for (i = 0; i < ist->nb_filters; i++) {
225  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228  if (ret != AVERROR_EOF && ret < 0)
229  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
230  av_err2str(ret));
231  }
232 }
233 
235 {
236  AVFrame *frame = ist->sub2video.frame;
237  int8_t *dst;
238  int dst_linesize;
239  int num_rects, i;
240  int64_t pts, end_pts;
241 
242  if (!frame)
243  return;
244  if (sub) {
245  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
248  AV_TIME_BASE_Q, ist->st->time_base);
249  num_rects = sub->num_rects;
250  } else {
251  pts = ist->sub2video.end_pts;
252  end_pts = INT64_MAX;
253  num_rects = 0;
254  }
255  if (sub2video_get_blank_frame(ist) < 0) {
257  "Impossible to get a blank canvas.\n");
258  return;
259  }
260  dst = frame->data [0];
261  dst_linesize = frame->linesize[0];
262  for (i = 0; i < num_rects; i++)
263  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
264  sub2video_push_ref(ist, pts);
265  ist->sub2video.end_pts = end_pts;
266 }
267 
268 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
269 {
270  InputFile *infile = input_files[ist->file_index];
271  int i, j, nb_reqs;
272  int64_t pts2;
273 
274  /* When a frame is read from a file, examine all sub2video streams in
275  the same file and send the sub2video frame again. Otherwise, decoded
276  video frames could be accumulating in the filter graph while a filter
277  (possibly overlay) is desperately waiting for a subtitle frame. */
278  for (i = 0; i < infile->nb_streams; i++) {
279  InputStream *ist2 = input_streams[infile->ist_index + i];
280  if (!ist2->sub2video.frame)
281  continue;
282  /* subtitles seem to be usually muxed ahead of other streams;
283  if not, subtracting a larger time here is necessary */
284  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
285  /* do not send the heartbeat frame if the subtitle is already ahead */
286  if (pts2 <= ist2->sub2video.last_pts)
287  continue;
288  if (pts2 >= ist2->sub2video.end_pts ||
289  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
290  sub2video_update(ist2, NULL);
291  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
292  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
293  if (nb_reqs)
294  sub2video_push_ref(ist2, pts2);
295  }
296 }
297 
298 static void sub2video_flush(InputStream *ist)
299 {
300  int i;
301  int ret;
302 
303  if (ist->sub2video.end_pts < INT64_MAX)
304  sub2video_update(ist, NULL);
305  for (i = 0; i < ist->nb_filters; i++) {
306  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
307  if (ret != AVERROR_EOF && ret < 0)
308  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
309  }
310 }
311 
312 /* end of sub2video hack */
313 
314 static void term_exit_sigsafe(void)
315 {
316 #if HAVE_TERMIOS_H
317  if(restore_tty)
318  tcsetattr (0, TCSANOW, &oldtty);
319 #endif
320 }
321 
322 void term_exit(void)
323 {
324  av_log(NULL, AV_LOG_QUIET, "%s", "");
326 }
327 
328 static volatile int received_sigterm = 0;
329 static volatile int received_nb_signals = 0;
331 static volatile int ffmpeg_exited = 0;
332 static int main_return_code = 0;
333 
334 static void
336 {
337  int ret;
338  received_sigterm = sig;
341  if(received_nb_signals > 3) {
342  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
343  strlen("Received > 3 system signals, hard exiting\n"));
344  if (ret < 0) { /* Do nothing */ };
345  exit(123);
346  }
347 }
348 
349 #if HAVE_SETCONSOLECTRLHANDLER
350 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
351 {
352  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
353 
354  switch (fdwCtrlType)
355  {
356  case CTRL_C_EVENT:
357  case CTRL_BREAK_EVENT:
358  sigterm_handler(SIGINT);
359  return TRUE;
360 
361  case CTRL_CLOSE_EVENT:
362  case CTRL_LOGOFF_EVENT:
363  case CTRL_SHUTDOWN_EVENT:
364  sigterm_handler(SIGTERM);
365  /* Basically, with these 3 events, when we return from this method the
366  process is hard terminated, so stall as long as we need to
367  to try and let the main thread(s) clean up and gracefully terminate
368  (we have at most 5 seconds, but should be done far before that). */
369  while (!ffmpeg_exited) {
370  Sleep(0);
371  }
372  return TRUE;
373 
374  default:
375  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376  return FALSE;
377  }
378 }
379 #endif
380 
381 void term_init(void)
382 {
383 #if HAVE_TERMIOS_H
385  struct termios tty;
386  if (tcgetattr (0, &tty) == 0) {
387  oldtty = tty;
388  restore_tty = 1;
389 
390  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
391  |INLCR|IGNCR|ICRNL|IXON);
392  tty.c_oflag |= OPOST;
393  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
394  tty.c_cflag &= ~(CSIZE|PARENB);
395  tty.c_cflag |= CS8;
396  tty.c_cc[VMIN] = 1;
397  tty.c_cc[VTIME] = 0;
398 
399  tcsetattr (0, TCSANOW, &tty);
400  }
401  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
402  }
403 #endif
404 
405  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
406  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
407 #ifdef SIGXCPU
408  signal(SIGXCPU, sigterm_handler);
409 #endif
410 #ifdef SIGPIPE
411  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
412 #endif
413 #if HAVE_SETCONSOLECTRLHANDLER
414  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
415 #endif
416 }
417 
418 /* read a key without blocking */
419 static int read_key(void)
420 {
421  unsigned char ch;
422 #if HAVE_TERMIOS_H
423  int n = 1;
424  struct timeval tv;
425  fd_set rfds;
426 
427  FD_ZERO(&rfds);
428  FD_SET(0, &rfds);
429  tv.tv_sec = 0;
430  tv.tv_usec = 0;
431  n = select(1, &rfds, NULL, NULL, &tv);
432  if (n > 0) {
433  n = read(0, &ch, 1);
434  if (n == 1)
435  return ch;
436 
437  return n;
438  }
439 #elif HAVE_KBHIT
440 # if HAVE_PEEKNAMEDPIPE
441  static int is_pipe;
442  static HANDLE input_handle;
443  DWORD dw, nchars;
444  if(!input_handle){
445  input_handle = GetStdHandle(STD_INPUT_HANDLE);
446  is_pipe = !GetConsoleMode(input_handle, &dw);
447  }
448 
449  if (is_pipe) {
450  /* When running under a GUI, you will end here. */
451  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
452  // input pipe may have been closed by the program that ran ffmpeg
453  return -1;
454  }
455  //Read it
456  if(nchars != 0) {
457  read(0, &ch, 1);
458  return ch;
459  }else{
460  return -1;
461  }
462  }
463 # endif
464  if(kbhit())
465  return(getch());
466 #endif
467  return -1;
468 }
469 
470 static int decode_interrupt_cb(void *ctx)
471 {
473 }
474 
476 
477 static void ffmpeg_cleanup(int ret)
478 {
479  int i, j;
480 
481  if (do_benchmark) {
482  int maxrss = getmaxrss() / 1024;
483  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
484  }
485 
486  for (i = 0; i < nb_filtergraphs; i++) {
487  FilterGraph *fg = filtergraphs[i];
489  for (j = 0; j < fg->nb_inputs; j++) {
490  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
491  AVFrame *frame;
492  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
493  sizeof(frame), NULL);
494  av_frame_free(&frame);
495  }
496  av_fifo_freep(&fg->inputs[j]->frame_queue);
497  if (fg->inputs[j]->ist->sub2video.sub_queue) {
498  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
499  AVSubtitle sub;
501  &sub, sizeof(sub), NULL);
502  avsubtitle_free(&sub);
503  }
505  }
507  av_freep(&fg->inputs[j]->name);
508  av_freep(&fg->inputs[j]);
509  }
510  av_freep(&fg->inputs);
511  for (j = 0; j < fg->nb_outputs; j++) {
512  av_freep(&fg->outputs[j]->name);
513  av_freep(&fg->outputs[j]->formats);
514  av_freep(&fg->outputs[j]->channel_layouts);
515  av_freep(&fg->outputs[j]->sample_rates);
516  av_freep(&fg->outputs[j]);
517  }
518  av_freep(&fg->outputs);
519  av_freep(&fg->graph_desc);
520 
521  av_freep(&filtergraphs[i]);
522  }
523  av_freep(&filtergraphs);
524 
526 
527  /* close files */
528  for (i = 0; i < nb_output_files; i++) {
529  OutputFile *of = output_files[i];
531  if (!of)
532  continue;
533  s = of->ctx;
534  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
535  avio_closep(&s->pb);
537  av_dict_free(&of->opts);
538 
539  av_freep(&output_files[i]);
540  }
541  for (i = 0; i < nb_output_streams; i++) {
542  OutputStream *ost = output_streams[i];
543 
544  if (!ost)
545  continue;
546 
547  for (j = 0; j < ost->nb_bitstream_filters; j++)
548  av_bsf_free(&ost->bsf_ctx[j]);
549  av_freep(&ost->bsf_ctx);
550 
552  av_frame_free(&ost->last_frame);
553  av_dict_free(&ost->encoder_opts);
554 
555  av_freep(&ost->forced_keyframes);
557  av_freep(&ost->avfilter);
558  av_freep(&ost->logfile_prefix);
559 
561  ost->audio_channels_mapped = 0;
562 
563  av_dict_free(&ost->sws_dict);
564 
567 
568  if (ost->muxing_queue) {
569  while (av_fifo_size(ost->muxing_queue)) {
570  AVPacket pkt;
571  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
572  av_packet_unref(&pkt);
573  }
575  }
576 
577  av_freep(&output_streams[i]);
578  }
579 #if HAVE_THREADS
580  free_input_threads();
581 #endif
582  for (i = 0; i < nb_input_files; i++) {
583  avformat_close_input(&input_files[i]->ctx);
584  av_freep(&input_files[i]);
585  }
586  for (i = 0; i < nb_input_streams; i++) {
587  InputStream *ist = input_streams[i];
588 
591  av_dict_free(&ist->decoder_opts);
594  av_freep(&ist->filters);
595  av_freep(&ist->hwaccel_device);
596  av_freep(&ist->dts_buffer);
597 
599 
600  av_freep(&input_streams[i]);
601  }
602 
603  if (vstats_file) {
604  if (fclose(vstats_file))
606  "Error closing vstats file, loss of information possible: %s\n",
607  av_err2str(AVERROR(errno)));
608  }
610 
611  av_freep(&input_streams);
612  av_freep(&input_files);
613  av_freep(&output_streams);
614  av_freep(&output_files);
615 
616  uninit_opts();
617 
619 
620  if (received_sigterm) {
621  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
622  (int) received_sigterm);
623  } else if (ret && atomic_load(&transcode_init_done)) {
624  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
625  }
626  term_exit();
627  ffmpeg_exited = 1;
628 }
629 
631 {
632  AVDictionaryEntry *t = NULL;
633 
634  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
636  }
637 }
638 
640 {
642  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
643  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
644  exit_program(1);
645  }
646 }
647 
648 static void abort_codec_experimental(AVCodec *c, int encoder)
649 {
650  exit_program(1);
651 }
652 
653 static void update_benchmark(const char *fmt, ...)
654 {
655  if (do_benchmark_all) {
656  int64_t t = getutime();
657  va_list va;
658  char buf[1024];
659 
660  if (fmt) {
661  va_start(va, fmt);
662  vsnprintf(buf, sizeof(buf), fmt, va);
663  va_end(va);
664  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
665  }
666  current_time = t;
667  }
668 }
669 
671 {
672  int i;
673  for (i = 0; i < nb_output_streams; i++) {
674  OutputStream *ost2 = output_streams[i];
675  ost2->finished |= ost == ost2 ? this_stream : others;
676  }
677 }
678 
679 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
680 {
681  AVFormatContext *s = of->ctx;
682  AVStream *st = ost->st;
683  int ret;
684 
685  /*
686  * Audio encoders may split the packets -- #frames in != #packets out.
687  * But there is no reordering, so we can limit the number of output packets
688  * by simply dropping them here.
689  * Counting encoded video frames needs to be done separately because of
690  * reordering, see do_video_out().
691  * Do not count the packet when unqueued because it has been counted when queued.
692  */
693  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
694  if (ost->frame_number >= ost->max_frames) {
695  av_packet_unref(pkt);
696  return;
697  }
698  ost->frame_number++;
699  }
700 
701  if (!of->header_written) {
702  AVPacket tmp_pkt = {0};
703  /* the muxer is not initialized yet, buffer the packet */
704  if (!av_fifo_space(ost->muxing_queue)) {
705  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
706  ost->max_muxing_queue_size);
707  if (new_size <= av_fifo_size(ost->muxing_queue)) {
709  "Too many packets buffered for output stream %d:%d.\n",
710  ost->file_index, ost->st->index);
711  exit_program(1);
712  }
713  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
714  if (ret < 0)
715  exit_program(1);
716  }
717  ret = av_packet_ref(&tmp_pkt, pkt);
718  if (ret < 0)
719  exit_program(1);
720  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
721  av_packet_unref(pkt);
722  return;
723  }
724 
727  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
728 
729  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
730  int i;
732  NULL);
733  ost->quality = sd ? AV_RL32(sd) : -1;
734  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
735 
736  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
737  if (sd && i < sd[5])
738  ost->error[i] = AV_RL64(sd + 8 + 8*i);
739  else
740  ost->error[i] = -1;
741  }
742 
743  if (ost->frame_rate.num && ost->is_cfr) {
744  if (pkt->duration > 0)
745  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
746  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
747  ost->mux_timebase);
748  }
749  }
750 
751  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
752 
753  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
754  if (pkt->dts != AV_NOPTS_VALUE &&
755  pkt->pts != AV_NOPTS_VALUE &&
756  pkt->dts > pkt->pts) {
757  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
758  pkt->dts, pkt->pts,
759  ost->file_index, ost->st->index);
760  pkt->pts =
761  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
762  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
763  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
764  }
766  pkt->dts != AV_NOPTS_VALUE &&
767  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
768  ost->last_mux_dts != AV_NOPTS_VALUE) {
769  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
770  if (pkt->dts < max) {
771  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
772  av_log(s, loglevel, "Non-monotonous DTS in output stream "
773  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
774  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
775  if (exit_on_error) {
776  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
777  exit_program(1);
778  }
779  av_log(s, loglevel, "changing to %"PRId64". This may result "
780  "in incorrect timestamps in the output file.\n",
781  max);
782  if (pkt->pts >= pkt->dts)
783  pkt->pts = FFMAX(pkt->pts, max);
784  pkt->dts = max;
785  }
786  }
787  }
788  ost->last_mux_dts = pkt->dts;
789 
790  ost->data_size += pkt->size;
791  ost->packets_written++;
792 
793  pkt->stream_index = ost->index;
794 
795  if (debug_ts) {
796  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
797  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
799  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
800  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
801  pkt->size
802  );
803  }
804 
805  ret = av_interleaved_write_frame(s, pkt);
806  if (ret < 0) {
807  print_error("av_interleaved_write_frame()", ret);
808  main_return_code = 1;
810  }
811  av_packet_unref(pkt);
812 }
813 
815 {
816  OutputFile *of = output_files[ost->file_index];
817 
818  ost->finished |= ENCODER_FINISHED;
819  if (of->shortest) {
820  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
821  of->recording_time = FFMIN(of->recording_time, end);
822  }
823 }
824 
825 /*
826  * Send a single packet to the output, applying any bitstream filters
827  * associated with the output stream. This may result in any number
828  * of packets actually being written, depending on what bitstream
829  * filters are applied. The supplied packet is consumed and will be
830  * blank (as if newly-allocated) when this function returns.
831  *
832  * If eof is set, instead indicate EOF to all bitstream filters and
833  * therefore flush any delayed packets to the output. A blank packet
834  * must be supplied in this case.
835  */
837  OutputStream *ost, int eof)
838 {
839  int ret = 0;
840 
841  /* apply the output bitstream filters, if any */
842  if (ost->nb_bitstream_filters) {
843  int idx;
844 
845  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
846  if (ret < 0)
847  goto finish;
848 
849  eof = 0;
850  idx = 1;
851  while (idx) {
852  /* get a packet from the previous filter up the chain */
853  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
854  if (ret == AVERROR(EAGAIN)) {
855  ret = 0;
856  idx--;
857  continue;
858  } else if (ret == AVERROR_EOF) {
859  eof = 1;
860  } else if (ret < 0)
861  goto finish;
862 
863  /* send it to the next filter down the chain or to the muxer */
864  if (idx < ost->nb_bitstream_filters) {
865  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
866  if (ret < 0)
867  goto finish;
868  idx++;
869  eof = 0;
870  } else if (eof)
871  goto finish;
872  else
873  write_packet(of, pkt, ost, 0);
874  }
875  } else if (!eof)
876  write_packet(of, pkt, ost, 0);
877 
878 finish:
879  if (ret < 0 && ret != AVERROR_EOF) {
880  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
882  if(exit_on_error)
883  exit_program(1);
884  }
885 }
886 
888 {
889  OutputFile *of = output_files[ost->file_index];
890 
891  if (of->recording_time != INT64_MAX &&
893  AV_TIME_BASE_Q) >= 0) {
894  close_output_stream(ost);
895  return 0;
896  }
897  return 1;
898 }
899 
901  AVFrame *frame)
902 {
903  AVCodecContext *enc = ost->enc_ctx;
904  AVPacket pkt;
905  int ret;
906 
907  av_init_packet(&pkt);
908  pkt.data = NULL;
909  pkt.size = 0;
910 
911  if (!check_recording_time(ost))
912  return;
913 
914  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
915  frame->pts = ost->sync_opts;
916  ost->sync_opts = frame->pts + frame->nb_samples;
917  ost->samples_encoded += frame->nb_samples;
918  ost->frames_encoded++;
919 
920  av_assert0(pkt.size || !pkt.data);
922  if (debug_ts) {
923  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
924  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
925  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926  enc->time_base.num, enc->time_base.den);
927  }
928 
929  ret = avcodec_send_frame(enc, frame);
930  if (ret < 0)
931  goto error;
932 
933  while (1) {
934  ret = avcodec_receive_packet(enc, &pkt);
935  if (ret == AVERROR(EAGAIN))
936  break;
937  if (ret < 0)
938  goto error;
939 
940  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941 
942  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
943 
944  if (debug_ts) {
945  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
946  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
947  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
948  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
949  }
950 
951  output_packet(of, &pkt, ost, 0);
952  }
953 
954  return;
955 error:
956  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
957  exit_program(1);
958 }
959 
960 static void do_subtitle_out(OutputFile *of,
961  OutputStream *ost,
962  AVSubtitle *sub)
963 {
964  int subtitle_out_max_size = 1024 * 1024;
965  int subtitle_out_size, nb, i;
966  AVCodecContext *enc;
967  AVPacket pkt;
968  int64_t pts;
969 
970  if (sub->pts == AV_NOPTS_VALUE) {
971  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972  if (exit_on_error)
973  exit_program(1);
974  return;
975  }
976 
977  enc = ost->enc_ctx;
978 
979  if (!subtitle_out) {
980  subtitle_out = av_malloc(subtitle_out_max_size);
981  if (!subtitle_out) {
982  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
983  exit_program(1);
984  }
985  }
986 
987  /* Note: DVB subtitle need one packet to draw them and one other
988  packet to clear them */
989  /* XXX: signal it in the codec context ? */
991  nb = 2;
992  else
993  nb = 1;
994 
995  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996  pts = sub->pts;
997  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
998  pts -= output_files[ost->file_index]->start_time;
999  for (i = 0; i < nb; i++) {
1000  unsigned save_num_rects = sub->num_rects;
1001 
1002  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1003  if (!check_recording_time(ost))
1004  return;
1005 
1006  sub->pts = pts;
1007  // start_display_time is required to be 0
1008  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1009  sub->end_display_time -= sub->start_display_time;
1010  sub->start_display_time = 0;
1011  if (i == 1)
1012  sub->num_rects = 0;
1013 
1014  ost->frames_encoded++;
1015 
1016  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1017  subtitle_out_max_size, sub);
1018  if (i == 1)
1019  sub->num_rects = save_num_rects;
1020  if (subtitle_out_size < 0) {
1021  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1022  exit_program(1);
1023  }
1024 
1025  av_init_packet(&pkt);
1026  pkt.data = subtitle_out;
1027  pkt.size = subtitle_out_size;
1028  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1029  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1031  /* XXX: the pts correction is handled here. Maybe handling
1032  it in the codec would be better */
1033  if (i == 0)
1034  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035  else
1036  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1037  }
1038  pkt.dts = pkt.pts;
1039  output_packet(of, &pkt, ost, 0);
1040  }
1041 }
1042 
1043 static void do_video_out(OutputFile *of,
1044  OutputStream *ost,
1045  AVFrame *next_picture,
1046  double sync_ipts)
1047 {
1048  int ret, format_video_sync;
1049  AVPacket pkt;
1050  AVCodecContext *enc = ost->enc_ctx;
1051  AVCodecParameters *mux_par = ost->st->codecpar;
1052  AVRational frame_rate;
1053  int nb_frames, nb0_frames, i;
1054  double delta, delta0;
1055  double duration = 0;
1056  int frame_size = 0;
1057  InputStream *ist = NULL;
1059 
1060  if (ost->source_index >= 0)
1061  ist = input_streams[ost->source_index];
1062 
1063  frame_rate = av_buffersink_get_frame_rate(filter);
1064  if (frame_rate.num > 0 && frame_rate.den > 0)
1065  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066 
1067  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1068  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069 
1070  if (!ost->filters_script &&
1071  !ost->filters &&
1072  next_picture &&
1073  ist &&
1074  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1075  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1076  }
1077 
1078  if (!next_picture) {
1079  //end, flushing
1080  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1081  ost->last_nb0_frames[1],
1082  ost->last_nb0_frames[2]);
1083  } else {
1084  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1085  delta = delta0 + duration;
1086 
1087  /* by default, we output a single frame */
1088  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1089  nb_frames = 1;
1090 
1091  format_video_sync = video_sync_method;
1092  if (format_video_sync == VSYNC_AUTO) {
1093  if(!strcmp(of->ctx->oformat->name, "avi")) {
1094  format_video_sync = VSYNC_VFR;
1095  } else
1096  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1097  if ( ist
1098  && format_video_sync == VSYNC_CFR
1099  && input_files[ist->file_index]->ctx->nb_streams == 1
1100  && input_files[ist->file_index]->input_ts_offset == 0) {
1101  format_video_sync = VSYNC_VSCFR;
1102  }
1103  if (format_video_sync == VSYNC_CFR && copy_ts) {
1104  format_video_sync = VSYNC_VSCFR;
1105  }
1106  }
1107  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1108 
1109  if (delta0 < 0 &&
1110  delta > 0 &&
1111  format_video_sync != VSYNC_PASSTHROUGH &&
1112  format_video_sync != VSYNC_DROP) {
1113  if (delta0 < -0.6) {
1114  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1115  } else
1116  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1117  sync_ipts = ost->sync_opts;
1118  duration += delta0;
1119  delta0 = 0;
1120  }
1121 
1122  switch (format_video_sync) {
1123  case VSYNC_VSCFR:
1124  if (ost->frame_number == 0 && delta0 >= 0.5) {
1125  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1126  delta = duration;
1127  delta0 = 0;
1128  ost->sync_opts = lrint(sync_ipts);
1129  }
1130  case VSYNC_CFR:
1131  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1132  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1133  nb_frames = 0;
1134  } else if (delta < -1.1)
1135  nb_frames = 0;
1136  else if (delta > 1.1) {
1137  nb_frames = lrintf(delta);
1138  if (delta0 > 1.1)
1139  nb0_frames = lrintf(delta0 - 0.6);
1140  }
1141  break;
1142  case VSYNC_VFR:
1143  if (delta <= -0.6)
1144  nb_frames = 0;
1145  else if (delta > 0.6)
1146  ost->sync_opts = lrint(sync_ipts);
1147  break;
1148  case VSYNC_DROP:
1149  case VSYNC_PASSTHROUGH:
1150  ost->sync_opts = lrint(sync_ipts);
1151  break;
1152  default:
1153  av_assert0(0);
1154  }
1155  }
1156 
1157  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1158  nb0_frames = FFMIN(nb0_frames, nb_frames);
1159 
1160  memmove(ost->last_nb0_frames + 1,
1161  ost->last_nb0_frames,
1162  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1163  ost->last_nb0_frames[0] = nb0_frames;
1164 
1165  if (nb0_frames == 0 && ost->last_dropped) {
1166  nb_frames_drop++;
1168  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1169  ost->frame_number, ost->st->index, ost->last_frame->pts);
1170  }
1171  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1172  if (nb_frames > dts_error_threshold * 30) {
1173  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1174  nb_frames_drop++;
1175  return;
1176  }
1177  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1178  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1179  if (nb_frames_dup > dup_warning) {
1180  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1181  dup_warning *= 10;
1182  }
1183  }
1184  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1185 
1186  /* duplicates frame if needed */
1187  for (i = 0; i < nb_frames; i++) {
1188  AVFrame *in_picture;
1189  av_init_packet(&pkt);
1190  pkt.data = NULL;
1191  pkt.size = 0;
1192 
1193  if (i < nb0_frames && ost->last_frame) {
1194  in_picture = ost->last_frame;
1195  } else
1196  in_picture = next_picture;
1197 
1198  if (!in_picture)
1199  return;
1200 
1201  in_picture->pts = ost->sync_opts;
1202 
1203 #if 1
1204  if (!check_recording_time(ost))
1205 #else
1206  if (ost->frame_number >= ost->max_frames)
1207 #endif
1208  return;
1209 
1210  {
1211  int forced_keyframe = 0;
1212  double pts_time;
1213 
1215  ost->top_field_first >= 0)
1216  in_picture->top_field_first = !!ost->top_field_first;
1217 
1218  if (in_picture->interlaced_frame) {
1219  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1220  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1221  else
1222  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1223  } else
1224  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1225 
1226  in_picture->quality = enc->global_quality;
1227  in_picture->pict_type = 0;
1228 
1229  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1230  in_picture->pts * av_q2d(enc->time_base) : NAN;
1231  if (ost->forced_kf_index < ost->forced_kf_count &&
1232  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1233  ost->forced_kf_index++;
1234  forced_keyframe = 1;
1235  } else if (ost->forced_keyframes_pexpr) {
1236  double res;
1237  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1240  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1246  res);
1247  if (res) {
1248  forced_keyframe = 1;
1254  }
1255 
1257  } else if ( ost->forced_keyframes
1258  && !strncmp(ost->forced_keyframes, "source", 6)
1259  && in_picture->key_frame==1) {
1260  forced_keyframe = 1;
1261  }
1262 
1263  if (forced_keyframe) {
1264  in_picture->pict_type = AV_PICTURE_TYPE_I;
1265  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1266  }
1267 
1269  if (debug_ts) {
1270  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1271  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1272  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1273  enc->time_base.num, enc->time_base.den);
1274  }
1275 
1276  ost->frames_encoded++;
1277 
1278  ret = avcodec_send_frame(enc, in_picture);
1279  if (ret < 0)
1280  goto error;
1281 
1282  while (1) {
1283  ret = avcodec_receive_packet(enc, &pkt);
1284  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1285  if (ret == AVERROR(EAGAIN))
1286  break;
1287  if (ret < 0)
1288  goto error;
1289 
1290  if (debug_ts) {
1291  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1292  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1293  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1294  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1295  }
1296 
1297  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1298  pkt.pts = ost->sync_opts;
1299 
1300  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1301 
1302  if (debug_ts) {
1303  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1304  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1305  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1306  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1307  }
1308 
1309  frame_size = pkt.size;
1310  output_packet(of, &pkt, ost, 0);
1311 
1312  /* if two pass, output log */
1313  if (ost->logfile && enc->stats_out) {
1314  fprintf(ost->logfile, "%s", enc->stats_out);
1315  }
1316  }
1317  }
1318  ost->sync_opts++;
1319  /*
1320  * For video, number of frames in == number of packets out.
1321  * But there may be reordering, so we can't throw away frames on encoder
1322  * flush, we need to limit them here, before they go into encoder.
1323  */
1324  ost->frame_number++;
1325 
1326  if (vstats_filename && frame_size)
1327  do_video_stats(ost, frame_size);
1328  }
1329 
1330  if (!ost->last_frame)
1331  ost->last_frame = av_frame_alloc();
1332  av_frame_unref(ost->last_frame);
1333  if (next_picture && ost->last_frame)
1334  av_frame_ref(ost->last_frame, next_picture);
1335  else
1336  av_frame_free(&ost->last_frame);
1337 
1338  return;
1339 error:
1340  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1341  exit_program(1);
1342 }
1343 
1344 static double psnr(double d)
1345 {
1346  return -10.0 * log10(d);
1347 }
1348 
1350 {
1351  AVCodecContext *enc;
1352  int frame_number;
1353  double ti1, bitrate, avg_bitrate;
1354 
1355  /* this is executed just the first time do_video_stats is called */
1356  if (!vstats_file) {
1357  vstats_file = fopen(vstats_filename, "w");
1358  if (!vstats_file) {
1359  perror("fopen");
1360  exit_program(1);
1361  }
1362  }
1363 
1364  enc = ost->enc_ctx;
1365  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1366  frame_number = ost->st->nb_frames;
1367  if (vstats_version <= 1) {
1368  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1369  ost->quality / (float)FF_QP2LAMBDA);
1370  } else {
1371  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1372  ost->quality / (float)FF_QP2LAMBDA);
1373  }
1374 
1375  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1376  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1377 
1378  fprintf(vstats_file,"f_size= %6d ", frame_size);
1379  /* compute pts value */
1380  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1381  if (ti1 < 0.01)
1382  ti1 = 0.01;
1383 
1384  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1385  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1386  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1387  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1388  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1389  }
1390 }
1391 
1392 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1393 
1395 {
1396  OutputFile *of = output_files[ost->file_index];
1397  int i;
1398 
1400 
1401  if (of->shortest) {
1402  for (i = 0; i < of->ctx->nb_streams; i++)
1403  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1404  }
1405 }
1406 
1407 /**
1408  * Get and encode new output from any of the filtergraphs, without causing
1409  * activity.
1410  *
1411  * @return 0 for success, <0 for severe errors
1412  */
1413 static int reap_filters(int flush)
1414 {
1415  AVFrame *filtered_frame = NULL;
1416  int i;
1417 
1418  /* Reap all buffers present in the buffer sinks */
1419  for (i = 0; i < nb_output_streams; i++) {
1420  OutputStream *ost = output_streams[i];
1421  OutputFile *of = output_files[ost->file_index];
1423  AVCodecContext *enc = ost->enc_ctx;
1424  int ret = 0;
1425 
1426  if (!ost->filter || !ost->filter->graph->graph)
1427  continue;
1428  filter = ost->filter->filter;
1429 
1430  if (!ost->initialized) {
1431  char error[1024] = "";
1432  ret = init_output_stream(ost, error, sizeof(error));
1433  if (ret < 0) {
1434  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1435  ost->file_index, ost->index, error);
1436  exit_program(1);
1437  }
1438  }
1439 
1440  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1441  return AVERROR(ENOMEM);
1442  }
1443  filtered_frame = ost->filtered_frame;
1444 
1445  while (1) {
1446  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1447  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1449  if (ret < 0) {
1450  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1452  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1453  } else if (flush && ret == AVERROR_EOF) {
1455  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1456  }
1457  break;
1458  }
1459  if (ost->finished) {
1460  av_frame_unref(filtered_frame);
1461  continue;
1462  }
1463  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1464  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1465  AVRational filter_tb = av_buffersink_get_time_base(filter);
1466  AVRational tb = enc->time_base;
1467  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1468 
1469  tb.den <<= extra_bits;
1470  float_pts =
1471  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1472  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1473  float_pts /= 1 << extra_bits;
1474  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1475  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1476 
1477  filtered_frame->pts =
1478  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1479  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1480  }
1481  //if (ost->source_index >= 0)
1482  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1483 
1484  switch (av_buffersink_get_type(filter)) {
1485  case AVMEDIA_TYPE_VIDEO:
1486  if (!ost->frame_aspect_ratio.num)
1487  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1488 
1489  if (debug_ts) {
1490  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1491  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1492  float_pts,
1493  enc->time_base.num, enc->time_base.den);
1494  }
1495 
1496  do_video_out(of, ost, filtered_frame, float_pts);
1497  break;
1498  case AVMEDIA_TYPE_AUDIO:
1499  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1500  enc->channels != filtered_frame->channels) {
1502  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1503  break;
1504  }
1505  do_audio_out(of, ost, filtered_frame);
1506  break;
1507  default:
1508  // TODO support subtitle filters
1509  av_assert0(0);
1510  }
1511 
1512  av_frame_unref(filtered_frame);
1513  }
1514  }
1515 
1516  return 0;
1517 }
1518 
1519 static void print_final_stats(int64_t total_size)
1520 {
1521  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1522  uint64_t subtitle_size = 0;
1523  uint64_t data_size = 0;
1524  float percent = -1.0;
1525  int i, j;
1526  int pass1_used = 1;
1527 
1528  for (i = 0; i < nb_output_streams; i++) {
1529  OutputStream *ost = output_streams[i];
1530  switch (ost->enc_ctx->codec_type) {
1531  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1532  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1533  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1534  default: other_size += ost->data_size; break;
1535  }
1536  extra_size += ost->enc_ctx->extradata_size;
1537  data_size += ost->data_size;
1540  pass1_used = 0;
1541  }
1542 
1543  if (data_size && total_size>0 && total_size >= data_size)
1544  percent = 100.0 * (total_size - data_size) / data_size;
1545 
1546  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1547  video_size / 1024.0,
1548  audio_size / 1024.0,
1549  subtitle_size / 1024.0,
1550  other_size / 1024.0,
1551  extra_size / 1024.0);
1552  if (percent >= 0.0)
1553  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1554  else
1555  av_log(NULL, AV_LOG_INFO, "unknown");
1556  av_log(NULL, AV_LOG_INFO, "\n");
1557 
1558  /* print verbose per-stream stats */
1559  for (i = 0; i < nb_input_files; i++) {
1560  InputFile *f = input_files[i];
1561  uint64_t total_packets = 0, total_size = 0;
1562 
1563  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1564  i, f->ctx->url);
1565 
1566  for (j = 0; j < f->nb_streams; j++) {
1567  InputStream *ist = input_streams[f->ist_index + j];
1568  enum AVMediaType type = ist->dec_ctx->codec_type;
1569 
1570  total_size += ist->data_size;
1571  total_packets += ist->nb_packets;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1574  i, j, media_type_string(type));
1575  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1576  ist->nb_packets, ist->data_size);
1577 
1578  if (ist->decoding_needed) {
1579  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1580  ist->frames_decoded);
1581  if (type == AVMEDIA_TYPE_AUDIO)
1582  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1583  av_log(NULL, AV_LOG_VERBOSE, "; ");
1584  }
1585 
1586  av_log(NULL, AV_LOG_VERBOSE, "\n");
1587  }
1588 
1589  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1590  total_packets, total_size);
1591  }
1592 
1593  for (i = 0; i < nb_output_files; i++) {
1594  OutputFile *of = output_files[i];
1595  uint64_t total_packets = 0, total_size = 0;
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1598  i, of->ctx->url);
1599 
1600  for (j = 0; j < of->ctx->nb_streams; j++) {
1601  OutputStream *ost = output_streams[of->ost_index + j];
1602  enum AVMediaType type = ost->enc_ctx->codec_type;
1603 
1604  total_size += ost->data_size;
1605  total_packets += ost->packets_written;
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1608  i, j, media_type_string(type));
1609  if (ost->encoding_needed) {
1610  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1611  ost->frames_encoded);
1612  if (type == AVMEDIA_TYPE_AUDIO)
1613  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1614  av_log(NULL, AV_LOG_VERBOSE, "; ");
1615  }
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1618  ost->packets_written, ost->data_size);
1619 
1620  av_log(NULL, AV_LOG_VERBOSE, "\n");
1621  }
1622 
1623  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1624  total_packets, total_size);
1625  }
1626  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1627  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1628  if (pass1_used) {
1629  av_log(NULL, AV_LOG_WARNING, "\n");
1630  } else {
1631  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1632  }
1633  }
1634 }
1635 
1636 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1637 {
1638  AVBPrint buf, buf_script;
1639  OutputStream *ost;
1640  AVFormatContext *oc;
1641  int64_t total_size;
1642  AVCodecContext *enc;
1643  int frame_number, vid, i;
1644  double bitrate;
1645  double speed;
1646  int64_t pts = INT64_MIN + 1;
1647  static int64_t last_time = -1;
1648  static int qp_histogram[52];
1649  int hours, mins, secs, us;
1650  const char *hours_sign;
1651  int ret;
1652  float t;
1653 
1654  if (!print_stats && !is_last_report && !progress_avio)
1655  return;
1656 
1657  if (!is_last_report) {
1658  if (last_time == -1) {
1659  last_time = cur_time;
1660  return;
1661  }
1662  if ((cur_time - last_time) < 500000)
1663  return;
1664  last_time = cur_time;
1665  }
1666 
1667  t = (cur_time-timer_start) / 1000000.0;
1668 
1669 
1670  oc = output_files[0]->ctx;
1671 
1672  total_size = avio_size(oc->pb);
1673  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1674  total_size = avio_tell(oc->pb);
1675 
1676  vid = 0;
1678  av_bprint_init(&buf_script, 0, 1);
1679  for (i = 0; i < nb_output_streams; i++) {
1680  float q = -1;
1681  ost = output_streams[i];
1682  enc = ost->enc_ctx;
1683  if (!ost->stream_copy)
1684  q = ost->quality / (float) FF_QP2LAMBDA;
1685 
1686  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1687  av_bprintf(&buf, "q=%2.1f ", q);
1688  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1689  ost->file_index, ost->index, q);
1690  }
1691  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1692  float fps;
1693 
1694  frame_number = ost->frame_number;
1695  fps = t > 1 ? frame_number / t : 0;
1696  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1697  frame_number, fps < 9.95, fps, q);
1698  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1699  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1700  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1701  ost->file_index, ost->index, q);
1702  if (is_last_report)
1703  av_bprintf(&buf, "L");
1704  if (qp_hist) {
1705  int j;
1706  int qp = lrintf(q);
1707  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1708  qp_histogram[qp]++;
1709  for (j = 0; j < 32; j++)
1710  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1711  }
1712 
1713  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1714  int j;
1715  double error, error_sum = 0;
1716  double scale, scale_sum = 0;
1717  double p;
1718  char type[3] = { 'Y','U','V' };
1719  av_bprintf(&buf, "PSNR=");
1720  for (j = 0; j < 3; j++) {
1721  if (is_last_report) {
1722  error = enc->error[j];
1723  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1724  } else {
1725  error = ost->error[j];
1726  scale = enc->width * enc->height * 255.0 * 255.0;
1727  }
1728  if (j)
1729  scale /= 4;
1730  error_sum += error;
1731  scale_sum += scale;
1732  p = psnr(error / scale);
1733  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1734  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1735  ost->file_index, ost->index, type[j] | 32, p);
1736  }
1737  p = psnr(error_sum / scale_sum);
1738  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1739  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1740  ost->file_index, ost->index, p);
1741  }
1742  vid = 1;
1743  }
1744  /* compute min output value */
1746  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1747  ost->st->time_base, AV_TIME_BASE_Q));
1748  if (is_last_report)
1749  nb_frames_drop += ost->last_dropped;
1750  }
1751 
1752  secs = FFABS(pts) / AV_TIME_BASE;
1753  us = FFABS(pts) % AV_TIME_BASE;
1754  mins = secs / 60;
1755  secs %= 60;
1756  hours = mins / 60;
1757  mins %= 60;
1758  hours_sign = (pts < 0) ? "-" : "";
1759 
1760  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1761  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1762 
1763  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1764  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1765  if (pts == AV_NOPTS_VALUE) {
1766  av_bprintf(&buf, "N/A ");
1767  } else {
1768  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1769  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1770  }
1771 
1772  if (bitrate < 0) {
1773  av_bprintf(&buf, "bitrate=N/A");
1774  av_bprintf(&buf_script, "bitrate=N/A\n");
1775  }else{
1776  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1777  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1778  }
1779 
1780  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1781  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1782  if (pts == AV_NOPTS_VALUE) {
1783  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1784  av_bprintf(&buf_script, "out_time=N/A\n");
1785  } else {
1786  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1787  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1788  hours_sign, hours, mins, secs, us);
1789  }
1790 
1792  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1793  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1794  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1795 
1796  if (speed < 0) {
1797  av_bprintf(&buf, " speed=N/A");
1798  av_bprintf(&buf_script, "speed=N/A\n");
1799  } else {
1800  av_bprintf(&buf, " speed=%4.3gx", speed);
1801  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1802  }
1803 
1804  if (print_stats || is_last_report) {
1805  const char end = is_last_report ? '\n' : '\r';
1806  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1807  fprintf(stderr, "%s %c", buf.str, end);
1808  } else
1809  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1810 
1811  fflush(stderr);
1812  }
1813  av_bprint_finalize(&buf, NULL);
1814 
1815  if (progress_avio) {
1816  av_bprintf(&buf_script, "progress=%s\n",
1817  is_last_report ? "end" : "continue");
1818  avio_write(progress_avio, buf_script.str,
1819  FFMIN(buf_script.len, buf_script.size - 1));
1820  avio_flush(progress_avio);
1821  av_bprint_finalize(&buf_script, NULL);
1822  if (is_last_report) {
1823  if ((ret = avio_closep(&progress_avio)) < 0)
1825  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1826  }
1827  }
1828 
1829  if (is_last_report)
1830  print_final_stats(total_size);
1831 }
1832 
1834 {
1835  // We never got any input. Set a fake format, which will
1836  // come from libavformat.
1837  ifilter->format = par->format;
1838  ifilter->sample_rate = par->sample_rate;
1839  ifilter->channels = par->channels;
1840  ifilter->channel_layout = par->channel_layout;
1841  ifilter->width = par->width;
1842  ifilter->height = par->height;
1843  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1844 }
1845 
1846 static void flush_encoders(void)
1847 {
1848  int i, ret;
1849 
1850  for (i = 0; i < nb_output_streams; i++) {
1851  OutputStream *ost = output_streams[i];
1852  AVCodecContext *enc = ost->enc_ctx;
1853  OutputFile *of = output_files[ost->file_index];
1854 
1855  if (!ost->encoding_needed)
1856  continue;
1857 
1858  // Try to enable encoding with no input frames.
1859  // Maybe we should just let encoding fail instead.
1860  if (!ost->initialized) {
1861  FilterGraph *fg = ost->filter->graph;
1862  char error[1024] = "";
1863 
1865  "Finishing stream %d:%d without any data written to it.\n",
1866  ost->file_index, ost->st->index);
1867 
1868  if (ost->filter && !fg->graph) {
1869  int x;
1870  for (x = 0; x < fg->nb_inputs; x++) {
1871  InputFilter *ifilter = fg->inputs[x];
1872  if (ifilter->format < 0)
1873  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1874  }
1875 
1877  continue;
1878 
1879  ret = configure_filtergraph(fg);
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1882  exit_program(1);
1883  }
1884 
1885  finish_output_stream(ost);
1886  }
1887 
1888  ret = init_output_stream(ost, error, sizeof(error));
1889  if (ret < 0) {
1890  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891  ost->file_index, ost->index, error);
1892  exit_program(1);
1893  }
1894  }
1895 
1896  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1897  continue;
1898 
1900  continue;
1901 
1902  for (;;) {
1903  const char *desc = NULL;
1904  AVPacket pkt;
1905  int pkt_size;
1906 
1907  switch (enc->codec_type) {
1908  case AVMEDIA_TYPE_AUDIO:
1909  desc = "audio";
1910  break;
1911  case AVMEDIA_TYPE_VIDEO:
1912  desc = "video";
1913  break;
1914  default:
1915  av_assert0(0);
1916  }
1917 
1918  av_init_packet(&pkt);
1919  pkt.data = NULL;
1920  pkt.size = 0;
1921 
1923 
1924  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1925  ret = avcodec_send_frame(enc, NULL);
1926  if (ret < 0) {
1927  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1928  desc,
1929  av_err2str(ret));
1930  exit_program(1);
1931  }
1932  }
1933 
1934  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1935  if (ret < 0 && ret != AVERROR_EOF) {
1936  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1937  desc,
1938  av_err2str(ret));
1939  exit_program(1);
1940  }
1941  if (ost->logfile && enc->stats_out) {
1942  fprintf(ost->logfile, "%s", enc->stats_out);
1943  }
1944  if (ret == AVERROR_EOF) {
1945  output_packet(of, &pkt, ost, 1);
1946  break;
1947  }
1948  if (ost->finished & MUXER_FINISHED) {
1949  av_packet_unref(&pkt);
1950  continue;
1951  }
1952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1953  pkt_size = pkt.size;
1954  output_packet(of, &pkt, ost, 0);
1956  do_video_stats(ost, pkt_size);
1957  }
1958  }
1959  }
1960 }
1961 
1962 /*
1963  * Check whether a packet from ist should be written into ost at this time
1964  */
1966 {
1967  OutputFile *of = output_files[ost->file_index];
1968  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1969 
1970  if (ost->source_index != ist_index)
1971  return 0;
1972 
1973  if (ost->finished)
1974  return 0;
1975 
1976  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1977  return 0;
1978 
1979  return 1;
1980 }
1981 
1983 {
1984  OutputFile *of = output_files[ost->file_index];
1985  InputFile *f = input_files [ist->file_index];
1986  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1987  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1988  AVPacket opkt = { 0 };
1989 
1990  av_init_packet(&opkt);
1991 
1992  // EOF: flush output bitstream filters.
1993  if (!pkt) {
1994  output_packet(of, &opkt, ost, 1);
1995  return;
1996  }
1997 
1998  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000  return;
2001 
2002  if (!ost->frame_number && !ost->copy_prior_start) {
2003  int64_t comp_start = start_time;
2004  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2005  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2006  if (pkt->pts == AV_NOPTS_VALUE ?
2007  ist->pts < comp_start :
2008  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2009  return;
2010  }
2011 
2012  if (of->recording_time != INT64_MAX &&
2013  ist->pts >= of->recording_time + start_time) {
2014  close_output_stream(ost);
2015  return;
2016  }
2017 
2018  if (f->recording_time != INT64_MAX) {
2019  start_time = f->ctx->start_time;
2020  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2021  start_time += f->start_time;
2022  if (ist->pts >= f->recording_time + start_time) {
2023  close_output_stream(ost);
2024  return;
2025  }
2026  }
2027 
2028  /* force the input stream PTS */
2029  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2030  ost->sync_opts++;
2031 
2032  if (pkt->pts != AV_NOPTS_VALUE)
2033  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2034  else
2035  opkt.pts = AV_NOPTS_VALUE;
2036 
2037  if (pkt->dts == AV_NOPTS_VALUE)
2038  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2039  else
2040  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2041  opkt.dts -= ost_tb_start_time;
2042 
2043  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045  if(!duration)
2046  duration = ist->dec_ctx->frame_size;
2047  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049  ost->mux_timebase) - ost_tb_start_time;
2050  }
2051 
2052  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053 
2054  opkt.flags = pkt->flags;
2055 
2056  if (pkt->buf) {
2057  opkt.buf = av_buffer_ref(pkt->buf);
2058  if (!opkt.buf)
2059  exit_program(1);
2060  }
2061  opkt.data = pkt->data;
2062  opkt.size = pkt->size;
2063 
2064  av_copy_packet_side_data(&opkt, pkt);
2065 
2066  output_packet(of, &opkt, ost, 0);
2067 }
2068 
2070 {
2071  AVCodecContext *dec = ist->dec_ctx;
2072 
2073  if (!dec->channel_layout) {
2074  char layout_name[256];
2075 
2076  if (dec->channels > ist->guess_layout_max)
2077  return 0;
2079  if (!dec->channel_layout)
2080  return 0;
2081  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2082  dec->channels, dec->channel_layout);
2083  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2084  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2085  }
2086  return 1;
2087 }
2088 
2089 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2090 {
2091  if (*got_output || ret<0)
2092  decode_error_stat[ret<0] ++;
2093 
2094  if (ret < 0 && exit_on_error)
2095  exit_program(1);
2096 
2097  if (exit_on_error && *got_output && ist) {
2099  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2100  exit_program(1);
2101  }
2102  }
2103 }
2104 
2105 // Filters can be configured only if the formats of all inputs are known.
2107 {
2108  int i;
2109  for (i = 0; i < fg->nb_inputs; i++) {
2110  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2111  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2112  return 0;
2113  }
2114  return 1;
2115 }
2116 
2118 {
2119  FilterGraph *fg = ifilter->graph;
2120  int need_reinit, ret, i;
2121 
2122  /* determine if the parameters for this input changed */
2123  need_reinit = ifilter->format != frame->format;
2124  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2125  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2126  need_reinit = 1;
2127 
2128  switch (ifilter->ist->st->codecpar->codec_type) {
2129  case AVMEDIA_TYPE_AUDIO:
2130  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2131  ifilter->channels != frame->channels ||
2132  ifilter->channel_layout != frame->channel_layout;
2133  break;
2134  case AVMEDIA_TYPE_VIDEO:
2135  need_reinit |= ifilter->width != frame->width ||
2136  ifilter->height != frame->height;
2137  break;
2138  }
2139 
2140  if (need_reinit) {
2141  ret = ifilter_parameters_from_frame(ifilter, frame);
2142  if (ret < 0)
2143  return ret;
2144  }
2145 
2146  /* (re)init the graph if possible, otherwise buffer the frame and return */
2147  if (need_reinit || !fg->graph) {
2148  for (i = 0; i < fg->nb_inputs; i++) {
2149  if (!ifilter_has_all_input_formats(fg)) {
2150  AVFrame *tmp = av_frame_clone(frame);
2151  if (!tmp)
2152  return AVERROR(ENOMEM);
2153  av_frame_unref(frame);
2154 
2155  if (!av_fifo_space(ifilter->frame_queue)) {
2156  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2157  if (ret < 0) {
2158  av_frame_free(&tmp);
2159  return ret;
2160  }
2161  }
2162  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2163  return 0;
2164  }
2165  }
2166 
2167  ret = reap_filters(1);
2168  if (ret < 0 && ret != AVERROR_EOF) {
2169  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2170  return ret;
2171  }
2172 
2173  ret = configure_filtergraph(fg);
2174  if (ret < 0) {
2175  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2176  return ret;
2177  }
2178  }
2179 
2181  if (ret < 0) {
2182  if (ret != AVERROR_EOF)
2183  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2184  return ret;
2185  }
2186 
2187  return 0;
2188 }
2189 
2190 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2191 {
2192  int ret;
2193 
2194  ifilter->eof = 1;
2195 
2196  if (ifilter->filter) {
2197  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2198  if (ret < 0)
2199  return ret;
2200  } else {
2201  // the filtergraph was never configured
2202  if (ifilter->format < 0)
2203  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2204  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2205  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2206  return AVERROR_INVALIDDATA;
2207  }
2208  }
2209 
2210  return 0;
2211 }
2212 
2213 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2214 // There is the following difference: if you got a frame, you must call
2215 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2216 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2217 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2218 {
2219  int ret;
2220 
2221  *got_frame = 0;
2222 
2223  if (pkt) {
2224  ret = avcodec_send_packet(avctx, pkt);
2225  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2226  // decoded frames with avcodec_receive_frame() until done.
2227  if (ret < 0 && ret != AVERROR_EOF)
2228  return ret;
2229  }
2230 
2231  ret = avcodec_receive_frame(avctx, frame);
2232  if (ret < 0 && ret != AVERROR(EAGAIN))
2233  return ret;
2234  if (ret >= 0)
2235  *got_frame = 1;
2236 
2237  return 0;
2238 }
2239 
2240 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2241 {
2242  int i, ret;
2243  AVFrame *f;
2244 
2245  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2246  for (i = 0; i < ist->nb_filters; i++) {
2247  if (i < ist->nb_filters - 1) {
2248  f = ist->filter_frame;
2249  ret = av_frame_ref(f, decoded_frame);
2250  if (ret < 0)
2251  break;
2252  } else
2253  f = decoded_frame;
2254  ret = ifilter_send_frame(ist->filters[i], f);
2255  if (ret == AVERROR_EOF)
2256  ret = 0; /* ignore */
2257  if (ret < 0) {
2259  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2260  break;
2261  }
2262  }
2263  return ret;
2264 }
2265 
2266 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2267  int *decode_failed)
2268 {
2269  AVFrame *decoded_frame;
2270  AVCodecContext *avctx = ist->dec_ctx;
2271  int ret, err = 0;
2272  AVRational decoded_frame_tb;
2273 
2274  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2275  return AVERROR(ENOMEM);
2276  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2277  return AVERROR(ENOMEM);
2278  decoded_frame = ist->decoded_frame;
2279 
2281  ret = decode(avctx, decoded_frame, got_output, pkt);
2282  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2283  if (ret < 0)
2284  *decode_failed = 1;
2285 
2286  if (ret >= 0 && avctx->sample_rate <= 0) {
2287  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2288  ret = AVERROR_INVALIDDATA;
2289  }
2290 
2291  if (ret != AVERROR_EOF)
2292  check_decode_result(ist, got_output, ret);
2293 
2294  if (!*got_output || ret < 0)
2295  return ret;
2296 
2297  ist->samples_decoded += decoded_frame->nb_samples;
2298  ist->frames_decoded++;
2299 
2300 #if 1
2301  /* increment next_dts to use for the case where the input stream does not
2302  have timestamps or there are multiple frames in the packet */
2303  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2304  avctx->sample_rate;
2305  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2306  avctx->sample_rate;
2307 #endif
2308 
2309  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2310  decoded_frame_tb = ist->st->time_base;
2311  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2312  decoded_frame->pts = pkt->pts;
2313  decoded_frame_tb = ist->st->time_base;
2314  }else {
2315  decoded_frame->pts = ist->dts;
2316  decoded_frame_tb = AV_TIME_BASE_Q;
2317  }
2318  if (decoded_frame->pts != AV_NOPTS_VALUE)
2319  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2320  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2321  (AVRational){1, avctx->sample_rate});
2322  ist->nb_samples = decoded_frame->nb_samples;
2323  err = send_frame_to_filters(ist, decoded_frame);
2324 
2325  av_frame_unref(ist->filter_frame);
2326  av_frame_unref(decoded_frame);
2327  return err < 0 ? err : ret;
2328 }
2329 
2330 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2331  int *decode_failed)
2332 {
2333  AVFrame *decoded_frame;
2334  int i, ret = 0, err = 0;
2335  int64_t best_effort_timestamp;
2336  int64_t dts = AV_NOPTS_VALUE;
2337  AVPacket avpkt;
2338 
2339  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2340  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2341  // skip the packet.
2342  if (!eof && pkt && pkt->size == 0)
2343  return 0;
2344 
2345  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2346  return AVERROR(ENOMEM);
2347  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2348  return AVERROR(ENOMEM);
2349  decoded_frame = ist->decoded_frame;
2350  if (ist->dts != AV_NOPTS_VALUE)
2351  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2352  if (pkt) {
2353  avpkt = *pkt;
2354  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2355  }
2356 
2357  // The old code used to set dts on the drain packet, which does not work
2358  // with the new API anymore.
2359  if (eof) {
2360  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2361  if (!new)
2362  return AVERROR(ENOMEM);
2363  ist->dts_buffer = new;
2364  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2365  }
2366 
2368  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2369  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2370  if (ret < 0)
2371  *decode_failed = 1;
2372 
2373  // The following line may be required in some cases where there is no parser
2374  // or the parser does not has_b_frames correctly
2375  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2376  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2377  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2378  } else
2380  "video_delay is larger in decoder than demuxer %d > %d.\n"
2381  "If you want to help, upload a sample "
2382  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2383  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2384  ist->dec_ctx->has_b_frames,
2385  ist->st->codecpar->video_delay);
2386  }
2387 
2388  if (ret != AVERROR_EOF)
2389  check_decode_result(ist, got_output, ret);
2390 
2391  if (*got_output && ret >= 0) {
2392  if (ist->dec_ctx->width != decoded_frame->width ||
2393  ist->dec_ctx->height != decoded_frame->height ||
2394  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2395  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2396  decoded_frame->width,
2397  decoded_frame->height,
2398  decoded_frame->format,
2399  ist->dec_ctx->width,
2400  ist->dec_ctx->height,
2401  ist->dec_ctx->pix_fmt);
2402  }
2403  }
2404 
2405  if (!*got_output || ret < 0)
2406  return ret;
2407 
2408  if(ist->top_field_first>=0)
2409  decoded_frame->top_field_first = ist->top_field_first;
2410 
2411  ist->frames_decoded++;
2412 
2413  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2414  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2415  if (err < 0)
2416  goto fail;
2417  }
2418  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2419 
2420  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2421  *duration_pts = decoded_frame->pkt_duration;
2422 
2423  if (ist->framerate.num)
2424  best_effort_timestamp = ist->cfr_next_pts++;
2425 
2426  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2427  best_effort_timestamp = ist->dts_buffer[0];
2428 
2429  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2430  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2431  ist->nb_dts_buffer--;
2432  }
2433 
2434  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2435  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2436 
2437  if (ts != AV_NOPTS_VALUE)
2438  ist->next_pts = ist->pts = ts;
2439  }
2440 
2441  if (debug_ts) {
2442  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2443  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2444  ist->st->index, av_ts2str(decoded_frame->pts),
2445  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2446  best_effort_timestamp,
2447  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2448  decoded_frame->key_frame, decoded_frame->pict_type,
2449  ist->st->time_base.num, ist->st->time_base.den);
2450  }
2451 
2452  if (ist->st->sample_aspect_ratio.num)
2453  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2454 
2455  err = send_frame_to_filters(ist, decoded_frame);
2456 
2457 fail:
2459  av_frame_unref(decoded_frame);
2460  return err < 0 ? err : ret;
2461 }
2462 
2463 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2464  int *decode_failed)
2465 {
2466  AVSubtitle subtitle;
2467  int free_sub = 1;
2468  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2469  &subtitle, got_output, pkt);
2470 
2471  check_decode_result(NULL, got_output, ret);
2472 
2473  if (ret < 0 || !*got_output) {
2474  *decode_failed = 1;
2475  if (!pkt->size)
2476  sub2video_flush(ist);
2477  return ret;
2478  }
2479 
2480  if (ist->fix_sub_duration) {
2481  int end = 1;
2482  if (ist->prev_sub.got_output) {
2483  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2484  1000, AV_TIME_BASE);
2485  if (end < ist->prev_sub.subtitle.end_display_time) {
2486  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2487  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2489  end <= 0 ? ", dropping it" : "");
2491  }
2492  }
2493  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2494  FFSWAP(int, ret, ist->prev_sub.ret);
2495  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2496  if (end <= 0)
2497  goto out;
2498  }
2499 
2500  if (!*got_output)
2501  return ret;
2502 
2503  if (ist->sub2video.frame) {
2504  sub2video_update(ist, &subtitle);
2505  } else if (ist->nb_filters) {
2506  if (!ist->sub2video.sub_queue)
2507  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2508  if (!ist->sub2video.sub_queue)
2509  exit_program(1);
2510  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2512  if (ret < 0)
2513  exit_program(1);
2514  }
2515  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2516  free_sub = 0;
2517  }
2518 
2519  if (!subtitle.num_rects)
2520  goto out;
2521 
2522  ist->frames_decoded++;
2523 
2524  for (i = 0; i < nb_output_streams; i++) {
2525  OutputStream *ost = output_streams[i];
2526 
2527  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2528  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2529  continue;
2530 
2531  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2532  }
2533 
2534 out:
2535  if (free_sub)
2536  avsubtitle_free(&subtitle);
2537  return ret;
2538 }
2539 
2541 {
2542  int i, ret;
2543  /* TODO keep pts also in stream time base to avoid converting back */
2544  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2546 
2547  for (i = 0; i < ist->nb_filters; i++) {
2548  ret = ifilter_send_eof(ist->filters[i], pts);
2549  if (ret < 0)
2550  return ret;
2551  }
2552  return 0;
2553 }
2554 
2555 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2556 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2557 {
2558  int ret = 0, i;
2559  int repeating = 0;
2560  int eof_reached = 0;
2561 
2562  AVPacket avpkt;
2563  if (!ist->saw_first_ts) {
2564  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2565  ist->pts = 0;
2566  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2567  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2568  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2569  }
2570  ist->saw_first_ts = 1;
2571  }
2572 
2573  if (ist->next_dts == AV_NOPTS_VALUE)
2574  ist->next_dts = ist->dts;
2575  if (ist->next_pts == AV_NOPTS_VALUE)
2576  ist->next_pts = ist->pts;
2577 
2578  if (!pkt) {
2579  /* EOF handling */
2580  av_init_packet(&avpkt);
2581  avpkt.data = NULL;
2582  avpkt.size = 0;
2583  } else {
2584  avpkt = *pkt;
2585  }
2586 
2587  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2588  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2589  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2590  ist->next_pts = ist->pts = ist->dts;
2591  }
2592 
2593  // while we have more to decode or while the decoder did output something on EOF
2594  while (ist->decoding_needed) {
2595  int64_t duration_dts = 0;
2596  int64_t duration_pts = 0;
2597  int got_output = 0;
2598  int decode_failed = 0;
2599 
2600  ist->pts = ist->next_pts;
2601  ist->dts = ist->next_dts;
2602 
2603  switch (ist->dec_ctx->codec_type) {
2604  case AVMEDIA_TYPE_AUDIO:
2605  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2606  &decode_failed);
2607  break;
2608  case AVMEDIA_TYPE_VIDEO:
2609  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2610  &decode_failed);
2611  if (!repeating || !pkt || got_output) {
2612  if (pkt && pkt->duration) {
2613  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2614  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2616  duration_dts = ((int64_t)AV_TIME_BASE *
2617  ist->dec_ctx->framerate.den * ticks) /
2619  }
2620 
2621  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2622  ist->next_dts += duration_dts;
2623  }else
2624  ist->next_dts = AV_NOPTS_VALUE;
2625  }
2626 
2627  if (got_output) {
2628  if (duration_pts > 0) {
2629  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2630  } else {
2631  ist->next_pts += duration_dts;
2632  }
2633  }
2634  break;
2635  case AVMEDIA_TYPE_SUBTITLE:
2636  if (repeating)
2637  break;
2638  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2639  if (!pkt && ret >= 0)
2640  ret = AVERROR_EOF;
2641  break;
2642  default:
2643  return -1;
2644  }
2645 
2646  if (ret == AVERROR_EOF) {
2647  eof_reached = 1;
2648  break;
2649  }
2650 
2651  if (ret < 0) {
2652  if (decode_failed) {
2653  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2654  ist->file_index, ist->st->index, av_err2str(ret));
2655  } else {
2656  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2657  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2658  }
2659  if (!decode_failed || exit_on_error)
2660  exit_program(1);
2661  break;
2662  }
2663 
2664  if (got_output)
2665  ist->got_output = 1;
2666 
2667  if (!got_output)
2668  break;
2669 
2670  // During draining, we might get multiple output frames in this loop.
2671  // ffmpeg.c does not drain the filter chain on configuration changes,
2672  // which means if we send multiple frames at once to the filters, and
2673  // one of those frames changes configuration, the buffered frames will
2674  // be lost. This can upset certain FATE tests.
2675  // Decode only 1 frame per call on EOF to appease these FATE tests.
2676  // The ideal solution would be to rewrite decoding to use the new
2677  // decoding API in a better way.
2678  if (!pkt)
2679  break;
2680 
2681  repeating = 1;
2682  }
2683 
2684  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2685  /* except when looping we need to flush but not to send an EOF */
2686  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2687  int ret = send_filter_eof(ist);
2688  if (ret < 0) {
2689  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2690  exit_program(1);
2691  }
2692  }
2693 
2694  /* handle stream copy */
2695  if (!ist->decoding_needed && pkt) {
2696  ist->dts = ist->next_dts;
2697  switch (ist->dec_ctx->codec_type) {
2698  case AVMEDIA_TYPE_AUDIO:
2699  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2700  ist->dec_ctx->sample_rate;
2701  break;
2702  case AVMEDIA_TYPE_VIDEO:
2703  if (ist->framerate.num) {
2704  // TODO: Remove work-around for c99-to-c89 issue 7
2705  AVRational time_base_q = AV_TIME_BASE_Q;
2706  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2707  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2708  } else if (pkt->duration) {
2709  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2710  } else if(ist->dec_ctx->framerate.num != 0) {
2711  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2712  ist->next_dts += ((int64_t)AV_TIME_BASE *
2713  ist->dec_ctx->framerate.den * ticks) /
2715  }
2716  break;
2717  }
2718  ist->pts = ist->dts;
2719  ist->next_pts = ist->next_dts;
2720  }
2721  for (i = 0; i < nb_output_streams; i++) {
2722  OutputStream *ost = output_streams[i];
2723 
2724  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2725  continue;
2726 
2727  do_streamcopy(ist, ost, pkt);
2728  }
2729 
2730  return !eof_reached;
2731 }
2732 
2733 static void print_sdp(void)
2734 {
2735  char sdp[16384];
2736  int i;
2737  int j;
2738  AVIOContext *sdp_pb;
2739  AVFormatContext **avc;
2740 
2741  for (i = 0; i < nb_output_files; i++) {
2742  if (!output_files[i]->header_written)
2743  return;
2744  }
2745 
2746  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2747  if (!avc)
2748  exit_program(1);
2749  for (i = 0, j = 0; i < nb_output_files; i++) {
2750  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2751  avc[j] = output_files[i]->ctx;
2752  j++;
2753  }
2754  }
2755 
2756  if (!j)
2757  goto fail;
2758 
2759  av_sdp_create(avc, j, sdp, sizeof(sdp));
2760 
2761  if (!sdp_filename) {
2762  printf("SDP:\n%s\n", sdp);
2763  fflush(stdout);
2764  } else {
2765  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2766  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2767  } else {
2768  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2769  avio_closep(&sdp_pb);
2771  }
2772  }
2773 
2774 fail:
2775  av_freep(&avc);
2776 }
2777 
2779 {
2780  InputStream *ist = s->opaque;
2781  const enum AVPixelFormat *p;
2782  int ret;
2783 
2784  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2786  const AVCodecHWConfig *config = NULL;
2787  int i;
2788 
2789  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2790  break;
2791 
2792  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2793  ist->hwaccel_id == HWACCEL_AUTO) {
2794  for (i = 0;; i++) {
2795  config = avcodec_get_hw_config(s->codec, i);
2796  if (!config)
2797  break;
2798  if (!(config->methods &
2800  continue;
2801  if (config->pix_fmt == *p)
2802  break;
2803  }
2804  }
2805  if (config) {
2806  if (config->device_type != ist->hwaccel_device_type) {
2807  // Different hwaccel offered, ignore.
2808  continue;
2809  }
2810 
2811  ret = hwaccel_decode_init(s);
2812  if (ret < 0) {
2813  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2815  "%s hwaccel requested for input stream #%d:%d, "
2816  "but cannot be initialized.\n",
2818  ist->file_index, ist->st->index);
2819  return AV_PIX_FMT_NONE;
2820  }
2821  continue;
2822  }
2823  } else {
2824  const HWAccel *hwaccel = NULL;
2825  int i;
2826  for (i = 0; hwaccels[i].name; i++) {
2827  if (hwaccels[i].pix_fmt == *p) {
2828  hwaccel = &hwaccels[i];
2829  break;
2830  }
2831  }
2832  if (!hwaccel) {
2833  // No hwaccel supporting this pixfmt.
2834  continue;
2835  }
2836  if (hwaccel->id != ist->hwaccel_id) {
2837  // Does not match requested hwaccel.
2838  continue;
2839  }
2840 
2841  ret = hwaccel->init(s);
2842  if (ret < 0) {
2844  "%s hwaccel requested for input stream #%d:%d, "
2845  "but cannot be initialized.\n", hwaccel->name,
2846  ist->file_index, ist->st->index);
2847  return AV_PIX_FMT_NONE;
2848  }
2849  }
2850 
2851  if (ist->hw_frames_ctx) {
2853  if (!s->hw_frames_ctx)
2854  return AV_PIX_FMT_NONE;
2855  }
2856 
2857  ist->hwaccel_pix_fmt = *p;
2858  break;
2859  }
2860 
2861  return *p;
2862 }
2863 
2865 {
2866  InputStream *ist = s->opaque;
2867 
2868  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2869  return ist->hwaccel_get_buffer(s, frame, flags);
2870 
2871  return avcodec_default_get_buffer2(s, frame, flags);
2872 }
2873 
2874 static int init_input_stream(int ist_index, char *error, int error_len)
2875 {
2876  int ret;
2877  InputStream *ist = input_streams[ist_index];
2878 
2879  if (ist->decoding_needed) {
2880  AVCodec *codec = ist->dec;
2881  if (!codec) {
2882  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2883  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2884  return AVERROR(EINVAL);
2885  }
2886 
2887  ist->dec_ctx->opaque = ist;
2888  ist->dec_ctx->get_format = get_format;
2889  ist->dec_ctx->get_buffer2 = get_buffer;
2890  ist->dec_ctx->thread_safe_callbacks = 1;
2891 
2892  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2893  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2894  (ist->decoding_needed & DECODING_FOR_OST)) {
2895  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2897  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2898  }
2899 
2900  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2901 
2902  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2903  * audio, and video decoders such as cuvid or mediacodec */
2904  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2905 
2906  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2907  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2908  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2910  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2911 
2912  ret = hw_device_setup_for_decode(ist);
2913  if (ret < 0) {
2914  snprintf(error, error_len, "Device setup failed for "
2915  "decoder on input stream #%d:%d : %s",
2916  ist->file_index, ist->st->index, av_err2str(ret));
2917  return ret;
2918  }
2919 
2920  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2921  if (ret == AVERROR_EXPERIMENTAL)
2922  abort_codec_experimental(codec, 0);
2923 
2924  snprintf(error, error_len,
2925  "Error while opening decoder for input stream "
2926  "#%d:%d : %s",
2927  ist->file_index, ist->st->index, av_err2str(ret));
2928  return ret;
2929  }
2931  }
2932 
2933  ist->next_pts = AV_NOPTS_VALUE;
2934  ist->next_dts = AV_NOPTS_VALUE;
2935 
2936  return 0;
2937 }
2938 
2940 {
2941  if (ost->source_index >= 0)
2942  return input_streams[ost->source_index];
2943  return NULL;
2944 }
2945 
2946 static int compare_int64(const void *a, const void *b)
2947 {
2948  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2949 }
2950 
2951 /* open the muxer when all the streams are initialized */
2952 static int check_init_output_file(OutputFile *of, int file_index)
2953 {
2954  int ret, i;
2955 
2956  for (i = 0; i < of->ctx->nb_streams; i++) {
2957  OutputStream *ost = output_streams[of->ost_index + i];
2958  if (!ost->initialized)
2959  return 0;
2960  }
2961 
2962  of->ctx->interrupt_callback = int_cb;
2963 
2964  ret = avformat_write_header(of->ctx, &of->opts);
2965  if (ret < 0) {
2967  "Could not write header for output file #%d "
2968  "(incorrect codec parameters ?): %s\n",
2969  file_index, av_err2str(ret));
2970  return ret;
2971  }
2972  //assert_avoptions(of->opts);
2973  of->header_written = 1;
2974 
2975  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2976 
2977  if (sdp_filename || want_sdp)
2978  print_sdp();
2979 
2980  /* flush the muxing queues */
2981  for (i = 0; i < of->ctx->nb_streams; i++) {
2982  OutputStream *ost = output_streams[of->ost_index + i];
2983 
2984  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2985  if (!av_fifo_size(ost->muxing_queue))
2986  ost->mux_timebase = ost->st->time_base;
2987 
2988  while (av_fifo_size(ost->muxing_queue)) {
2989  AVPacket pkt;
2990  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2991  write_packet(of, &pkt, ost, 1);
2992  }
2993  }
2994 
2995  return 0;
2996 }
2997 
2999 {
3000  AVBSFContext *ctx;
3001  int i, ret;
3002 
3003  if (!ost->nb_bitstream_filters)
3004  return 0;
3005 
3006  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3007  ctx = ost->bsf_ctx[i];
3008 
3009  ret = avcodec_parameters_copy(ctx->par_in,
3010  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3011  if (ret < 0)
3012  return ret;
3013 
3014  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3015 
3016  ret = av_bsf_init(ctx);
3017  if (ret < 0) {
3018  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3019  ost->bsf_ctx[i]->filter->name);
3020  return ret;
3021  }
3022  }
3023 
3024  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3025  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3026  if (ret < 0)
3027  return ret;
3028 
3029  ost->st->time_base = ctx->time_base_out;
3030 
3031  return 0;
3032 }
3033 
3035 {
3036  OutputFile *of = output_files[ost->file_index];
3037  InputStream *ist = get_input_stream(ost);
3038  AVCodecParameters *par_dst = ost->st->codecpar;
3039  AVCodecParameters *par_src = ost->ref_par;
3040  AVRational sar;
3041  int i, ret;
3042  uint32_t codec_tag = par_dst->codec_tag;
3043 
3044  av_assert0(ist && !ost->filter);
3045 
3046  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3047  if (ret >= 0)
3048  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3049  if (ret < 0) {
3051  "Error setting up codec context options.\n");
3052  return ret;
3053  }
3055 
3056  if (!codec_tag) {
3057  unsigned int codec_tag_tmp;
3058  if (!of->ctx->oformat->codec_tag ||
3059  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3060  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3061  codec_tag = par_src->codec_tag;
3062  }
3063 
3064  ret = avcodec_parameters_copy(par_dst, par_src);
3065  if (ret < 0)
3066  return ret;
3067 
3068  par_dst->codec_tag = codec_tag;
3069 
3070  if (!ost->frame_rate.num)
3071  ost->frame_rate = ist->framerate;
3072  ost->st->avg_frame_rate = ost->frame_rate;
3073 
3075  if (ret < 0)
3076  return ret;
3077 
3078  // copy timebase while removing common factors
3079  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3081 
3082  // copy estimated duration as a hint to the muxer
3083  if (ost->st->duration <= 0 && ist->st->duration > 0)
3084  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3085 
3086  // copy disposition
3087  ost->st->disposition = ist->st->disposition;
3088 
3089  if (ist->st->nb_side_data) {
3090  for (i = 0; i < ist->st->nb_side_data; i++) {
3091  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3092  uint8_t *dst_data;
3093 
3094  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3095  if (!dst_data)
3096  return AVERROR(ENOMEM);
3097  memcpy(dst_data, sd_src->data, sd_src->size);
3098  }
3099  }
3100 
3101  if (ost->rotate_overridden) {
3103  sizeof(int32_t) * 9);
3104  if (sd)
3105  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3106  }
3107 
3108  switch (par_dst->codec_type) {
3109  case AVMEDIA_TYPE_AUDIO:
3110  if (audio_volume != 256) {
3111  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3112  exit_program(1);
3113  }
3114  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3115  par_dst->block_align= 0;
3116  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3117  par_dst->block_align= 0;
3118  break;
3119  case AVMEDIA_TYPE_VIDEO:
3120  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3121  sar =
3122  av_mul_q(ost->frame_aspect_ratio,
3123  (AVRational){ par_dst->height, par_dst->width });
3124  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3125  "with stream copy may produce invalid files\n");
3126  }
3127  else if (ist->st->sample_aspect_ratio.num)
3128  sar = ist->st->sample_aspect_ratio;
3129  else
3130  sar = par_src->sample_aspect_ratio;
3131  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3132  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3133  ost->st->r_frame_rate = ist->st->r_frame_rate;
3134  break;
3135  }
3136 
3137  ost->mux_timebase = ist->st->time_base;
3138 
3139  return 0;
3140 }
3141 
3143 {
3144  AVDictionaryEntry *e;
3145 
3146  uint8_t *encoder_string;
3147  int encoder_string_len;
3148  int format_flags = 0;
3149  int codec_flags = ost->enc_ctx->flags;
3150 
3151  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3152  return;
3153 
3154  e = av_dict_get(of->opts, "fflags", NULL, 0);
3155  if (e) {
3156  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3157  if (!o)
3158  return;
3159  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3160  }
3161  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3162  if (e) {
3163  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3164  if (!o)
3165  return;
3166  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3167  }
3168 
3169  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3170  encoder_string = av_mallocz(encoder_string_len);
3171  if (!encoder_string)
3172  exit_program(1);
3173 
3174  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3175  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3176  else
3177  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3178  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3179  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3181 }
3182 
3184  AVCodecContext *avctx)
3185 {
3186  char *p;
3187  int n = 1, i, size, index = 0;
3188  int64_t t, *pts;
3189 
3190  for (p = kf; *p; p++)
3191  if (*p == ',')
3192  n++;
3193  size = n;
3194  pts = av_malloc_array(size, sizeof(*pts));
3195  if (!pts) {
3196  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3197  exit_program(1);
3198  }
3199 
3200  p = kf;
3201  for (i = 0; i < n; i++) {
3202  char *next = strchr(p, ',');
3203 
3204  if (next)
3205  *next++ = 0;
3206 
3207  if (!memcmp(p, "chapters", 8)) {
3208 
3209  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3210  int j;
3211 
3212  if (avf->nb_chapters > INT_MAX - size ||
3213  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3214  sizeof(*pts)))) {
3216  "Could not allocate forced key frames array.\n");
3217  exit_program(1);
3218  }
3219  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3220  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3221 
3222  for (j = 0; j < avf->nb_chapters; j++) {
3223  AVChapter *c = avf->chapters[j];
3224  av_assert1(index < size);
3225  pts[index++] = av_rescale_q(c->start, c->time_base,
3226  avctx->time_base) + t;
3227  }
3228 
3229  } else {
3230 
3231  t = parse_time_or_die("force_key_frames", p, 1);
3232  av_assert1(index < size);
3233  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3234 
3235  }
3236 
3237  p = next;
3238  }
3239 
3240  av_assert0(index == size);
3241  qsort(pts, size, sizeof(*pts), compare_int64);
3242  ost->forced_kf_count = size;
3243  ost->forced_kf_pts = pts;
3244 }
3245 
3246 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3247 {
3248  InputStream *ist = get_input_stream(ost);
3249  AVCodecContext *enc_ctx = ost->enc_ctx;
3250  AVFormatContext *oc;
3251 
3252  if (ost->enc_timebase.num > 0) {
3253  enc_ctx->time_base = ost->enc_timebase;
3254  return;
3255  }
3256 
3257  if (ost->enc_timebase.num < 0) {
3258  if (ist) {
3259  enc_ctx->time_base = ist->st->time_base;
3260  return;
3261  }
3262 
3263  oc = output_files[ost->file_index]->ctx;
3264  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3265  }
3266 
3267  enc_ctx->time_base = default_time_base;
3268 }
3269 
3271 {
3272  InputStream *ist = get_input_stream(ost);
3273  AVCodecContext *enc_ctx = ost->enc_ctx;
3275  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3276  int j, ret;
3277 
3278  set_encoder_id(output_files[ost->file_index], ost);
3279 
3280  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3281  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3282  // which have to be filtered out to prevent leaking them to output files.
3283  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3284 
3285  if (ist) {
3286  ost->st->disposition = ist->st->disposition;
3287 
3288  dec_ctx = ist->dec_ctx;
3289 
3290  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3291  } else {
3292  for (j = 0; j < oc->nb_streams; j++) {
3293  AVStream *st = oc->streams[j];
3294  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3295  break;
3296  }
3297  if (j == oc->nb_streams)
3298  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3301  }
3302 
3303  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3304  if (!ost->frame_rate.num)
3306  if (ist && !ost->frame_rate.num)
3307  ost->frame_rate = ist->framerate;
3308  if (ist && !ost->frame_rate.num)
3309  ost->frame_rate = ist->st->r_frame_rate;
3310  if (ist && !ost->frame_rate.num) {
3311  ost->frame_rate = (AVRational){25, 1};
3313  "No information "
3314  "about the input framerate is available. Falling "
3315  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3316  "if you want a different framerate.\n",
3317  ost->file_index, ost->index);
3318  }
3319 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3320  if (ost->enc->supported_framerates && !ost->force_fps) {
3321  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3322  ost->frame_rate = ost->enc->supported_framerates[idx];
3323  }
3324  // reduce frame rate for mpeg4 to be within the spec limits
3325  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3326  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3327  ost->frame_rate.num, ost->frame_rate.den, 65535);
3328  }
3329  }
3330 
3331  switch (enc_ctx->codec_type) {
3332  case AVMEDIA_TYPE_AUDIO:
3334  if (dec_ctx)
3335  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3336  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3340 
3341  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3342  break;
3343 
3344  case AVMEDIA_TYPE_VIDEO:
3346 
3347  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3349  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3351  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3352  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3353  }
3354  for (j = 0; j < ost->forced_kf_count; j++)
3355  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3357  enc_ctx->time_base);
3358 
3359  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3360  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3361  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3362  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3363  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3365 
3366  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3367  if (dec_ctx)
3368  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3369  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3370 
3371  enc_ctx->framerate = ost->frame_rate;
3372 
3373  ost->st->avg_frame_rate = ost->frame_rate;
3374 
3375  if (!dec_ctx ||
3376  enc_ctx->width != dec_ctx->width ||
3377  enc_ctx->height != dec_ctx->height ||
3378  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3380  }
3381 
3382  if (ost->forced_keyframes) {
3383  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3386  if (ret < 0) {
3388  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3389  return ret;
3390  }
3395 
3396  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3397  // parse it only for static kf timings
3398  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3400  }
3401  }
3402  break;
3403  case AVMEDIA_TYPE_SUBTITLE:
3404  enc_ctx->time_base = AV_TIME_BASE_Q;
3405  if (!enc_ctx->width) {
3406  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3407  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3408  }
3409  break;
3410  case AVMEDIA_TYPE_DATA:
3411  break;
3412  default:
3413  abort();
3414  break;
3415  }
3416 
3417  ost->mux_timebase = enc_ctx->time_base;
3418 
3419  return 0;
3420 }
3421 
3422 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3423 {
3424  int ret = 0;
3425 
3426  if (ost->encoding_needed) {
3427  AVCodec *codec = ost->enc;
3428  AVCodecContext *dec = NULL;
3429  InputStream *ist;
3430 
3431  ret = init_output_stream_encode(ost);
3432  if (ret < 0)
3433  return ret;
3434 
3435  if ((ist = get_input_stream(ost)))
3436  dec = ist->dec_ctx;
3437  if (dec && dec->subtitle_header) {
3438  /* ASS code assumes this buffer is null terminated so add extra byte. */
3440  if (!ost->enc_ctx->subtitle_header)
3441  return AVERROR(ENOMEM);
3442  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3444  }
3445  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3446  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3447  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3448  !codec->defaults &&
3449  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3450  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3451  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3452 
3453  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3457  if (!ost->enc_ctx->hw_frames_ctx)
3458  return AVERROR(ENOMEM);
3459  } else {
3460  ret = hw_device_setup_for_encode(ost);
3461  if (ret < 0) {
3462  snprintf(error, error_len, "Device setup failed for "
3463  "encoder on output stream #%d:%d : %s",
3464  ost->file_index, ost->index, av_err2str(ret));
3465  return ret;
3466  }
3467  }
3468 
3469  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3470  if (ret == AVERROR_EXPERIMENTAL)
3471  abort_codec_experimental(codec, 1);
3472  snprintf(error, error_len,
3473  "Error while opening encoder for output stream #%d:%d - "
3474  "maybe incorrect parameters such as bit_rate, rate, width or height",
3475  ost->file_index, ost->index);
3476  return ret;
3477  }
3478  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3479  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3481  ost->enc_ctx->frame_size);
3483  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3484  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3485  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3486  " It takes bits/s as argument, not kbits/s\n");
3487 
3489  if (ret < 0) {
3491  "Error initializing the output stream codec context.\n");
3492  exit_program(1);
3493  }
3494  /*
3495  * FIXME: ost->st->codec should't be needed here anymore.
3496  */
3497  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3498  if (ret < 0)
3499  return ret;
3500 
3501  if (ost->enc_ctx->nb_coded_side_data) {
3502  int i;
3503 
3504  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3505  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3506  uint8_t *dst_data;
3507 
3508  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3509  if (!dst_data)
3510  return AVERROR(ENOMEM);
3511  memcpy(dst_data, sd_src->data, sd_src->size);
3512  }
3513  }
3514 
3515  /*
3516  * Add global input side data. For now this is naive, and copies it
3517  * from the input stream's global side data. All side data should
3518  * really be funneled over AVFrame and libavfilter, then added back to
3519  * packet side data, and then potentially using the first packet for
3520  * global side data.
3521  */
3522  if (ist) {
3523  int i;
3524  for (i = 0; i < ist->st->nb_side_data; i++) {
3525  AVPacketSideData *sd = &ist->st->side_data[i];
3526  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3527  if (!dst)
3528  return AVERROR(ENOMEM);
3529  memcpy(dst, sd->data, sd->size);
3530  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3531  av_display_rotation_set((uint32_t *)dst, 0);
3532  }
3533  }
3534 
3535  // copy timebase while removing common factors
3536  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3537  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3538 
3539  // copy estimated duration as a hint to the muxer
3540  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3541  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3542 
3543  ost->st->codec->codec= ost->enc_ctx->codec;
3544  } else if (ost->stream_copy) {
3546  if (ret < 0)
3547  return ret;
3548  }
3549 
3550  // parse user provided disposition, and update stream values
3551  if (ost->disposition) {
3552  static const AVOption opts[] = {
3553  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3554  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3555  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3556  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3557  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3558  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3559  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3560  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3561  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3562  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3563  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3564  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3565  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3566  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3567  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3568  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3569  { NULL },
3570  };
3571  static const AVClass class = {
3572  .class_name = "",
3573  .item_name = av_default_item_name,
3574  .option = opts,
3575  .version = LIBAVUTIL_VERSION_INT,
3576  };
3577  const AVClass *pclass = &class;
3578 
3579  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3580  if (ret < 0)
3581  return ret;
3582  }
3583 
3584  /* initialize bitstream filters for the output stream
3585  * needs to be done here, because the codec id for streamcopy is not
3586  * known until now */
3587  ret = init_output_bsfs(ost);
3588  if (ret < 0)
3589  return ret;
3590 
3591  ost->initialized = 1;
3592 
3593  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3594  if (ret < 0)
3595  return ret;
3596 
3597  return ret;
3598 }
3599 
3600 static void report_new_stream(int input_index, AVPacket *pkt)
3601 {
3602  InputFile *file = input_files[input_index];
3603  AVStream *st = file->ctx->streams[pkt->stream_index];
3604 
3605  if (pkt->stream_index < file->nb_streams_warn)
3606  return;
3607  av_log(file->ctx, AV_LOG_WARNING,
3608  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3610  input_index, pkt->stream_index,
3611  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3612  file->nb_streams_warn = pkt->stream_index + 1;
3613 }
3614 
3615 static int transcode_init(void)
3616 {
3617  int ret = 0, i, j, k;
3618  AVFormatContext *oc;
3619  OutputStream *ost;
3620  InputStream *ist;
3621  char error[1024] = {0};
3622 
3623  for (i = 0; i < nb_filtergraphs; i++) {
3624  FilterGraph *fg = filtergraphs[i];
3625  for (j = 0; j < fg->nb_outputs; j++) {
3626  OutputFilter *ofilter = fg->outputs[j];
3627  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3628  continue;
3629  if (fg->nb_inputs != 1)
3630  continue;
3631  for (k = nb_input_streams-1; k >= 0 ; k--)
3632  if (fg->inputs[0]->ist == input_streams[k])
3633  break;
3634  ofilter->ost->source_index = k;
3635  }
3636  }
3637 
3638  /* init framerate emulation */
3639  for (i = 0; i < nb_input_files; i++) {
3640  InputFile *ifile = input_files[i];
3641  if (ifile->rate_emu)
3642  for (j = 0; j < ifile->nb_streams; j++)
3643  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3644  }
3645 
3646  /* init input streams */
3647  for (i = 0; i < nb_input_streams; i++)
3648  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3649  for (i = 0; i < nb_output_streams; i++) {
3650  ost = output_streams[i];
3651  avcodec_close(ost->enc_ctx);
3652  }
3653  goto dump_format;
3654  }
3655 
3656  /* open each encoder */
3657  for (i = 0; i < nb_output_streams; i++) {
3658  // skip streams fed from filtergraphs until we have a frame for them
3659  if (output_streams[i]->filter)
3660  continue;
3661 
3662  ret = init_output_stream(output_streams[i], error, sizeof(error));
3663  if (ret < 0)
3664  goto dump_format;
3665  }
3666 
3667  /* discard unused programs */
3668  for (i = 0; i < nb_input_files; i++) {
3669  InputFile *ifile = input_files[i];
3670  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3671  AVProgram *p = ifile->ctx->programs[j];
3672  int discard = AVDISCARD_ALL;
3673 
3674  for (k = 0; k < p->nb_stream_indexes; k++)
3675  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3676  discard = AVDISCARD_DEFAULT;
3677  break;
3678  }
3679  p->discard = discard;
3680  }
3681  }
3682 
3683  /* write headers for files with no streams */
3684  for (i = 0; i < nb_output_files; i++) {
3685  oc = output_files[i]->ctx;
3686  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3687  ret = check_init_output_file(output_files[i], i);
3688  if (ret < 0)
3689  goto dump_format;
3690  }
3691  }
3692 
3693  dump_format:
3694  /* dump the stream mapping */
3695  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3696  for (i = 0; i < nb_input_streams; i++) {
3697  ist = input_streams[i];
3698 
3699  for (j = 0; j < ist->nb_filters; j++) {
3700  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3701  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3702  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3703  ist->filters[j]->name);
3704  if (nb_filtergraphs > 1)
3705  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3706  av_log(NULL, AV_LOG_INFO, "\n");
3707  }
3708  }
3709  }
3710 
3711  for (i = 0; i < nb_output_streams; i++) {
3712  ost = output_streams[i];
3713 
3714  if (ost->attachment_filename) {
3715  /* an attached file */
3716  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3717  ost->attachment_filename, ost->file_index, ost->index);
3718  continue;
3719  }
3720 
3721  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3722  /* output from a complex graph */
3723  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3724  if (nb_filtergraphs > 1)
3725  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3726 
3727  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3728  ost->index, ost->enc ? ost->enc->name : "?");
3729  continue;
3730  }
3731 
3732  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3733  input_streams[ost->source_index]->file_index,
3734  input_streams[ost->source_index]->st->index,
3735  ost->file_index,
3736  ost->index);
3737  if (ost->sync_ist != input_streams[ost->source_index])
3738  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3739  ost->sync_ist->file_index,
3740  ost->sync_ist->st->index);
3741  if (ost->stream_copy)
3742  av_log(NULL, AV_LOG_INFO, " (copy)");
3743  else {
3744  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3745  const AVCodec *out_codec = ost->enc;
3746  const char *decoder_name = "?";
3747  const char *in_codec_name = "?";
3748  const char *encoder_name = "?";
3749  const char *out_codec_name = "?";
3750  const AVCodecDescriptor *desc;
3751 
3752  if (in_codec) {
3753  decoder_name = in_codec->name;
3754  desc = avcodec_descriptor_get(in_codec->id);
3755  if (desc)
3756  in_codec_name = desc->name;
3757  if (!strcmp(decoder_name, in_codec_name))
3758  decoder_name = "native";
3759  }
3760 
3761  if (out_codec) {
3762  encoder_name = out_codec->name;
3763  desc = avcodec_descriptor_get(out_codec->id);
3764  if (desc)
3765  out_codec_name = desc->name;
3766  if (!strcmp(encoder_name, out_codec_name))
3767  encoder_name = "native";
3768  }
3769 
3770  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3771  in_codec_name, decoder_name,
3772  out_codec_name, encoder_name);
3773  }
3774  av_log(NULL, AV_LOG_INFO, "\n");
3775  }
3776 
3777  if (ret) {
3778  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3779  return ret;
3780  }
3781 
3783 
3784  return 0;
3785 }
3786 
3787 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3788 static int need_output(void)
3789 {
3790  int i;
3791 
3792  for (i = 0; i < nb_output_streams; i++) {
3793  OutputStream *ost = output_streams[i];
3794  OutputFile *of = output_files[ost->file_index];
3795  AVFormatContext *os = output_files[ost->file_index]->ctx;
3796 
3797  if (ost->finished ||
3798  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3799  continue;
3800  if (ost->frame_number >= ost->max_frames) {
3801  int j;
3802  for (j = 0; j < of->ctx->nb_streams; j++)
3803  close_output_stream(output_streams[of->ost_index + j]);
3804  continue;
3805  }
3806 
3807  return 1;
3808  }
3809 
3810  return 0;
3811 }
3812 
3813 /**
3814  * Select the output stream to process.
3815  *
3816  * @return selected output stream, or NULL if none available
3817  */
3819 {
3820  int i;
3821  int64_t opts_min = INT64_MAX;
3822  OutputStream *ost_min = NULL;
3823 
3824  for (i = 0; i < nb_output_streams; i++) {
3825  OutputStream *ost = output_streams[i];
3826  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3827  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3828  AV_TIME_BASE_Q);
3829  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3830  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3831 
3832  if (!ost->initialized && !ost->inputs_done)
3833  return ost;
3834 
3835  if (!ost->finished && opts < opts_min) {
3836  opts_min = opts;
3837  ost_min = ost->unavailable ? NULL : ost;
3838  }
3839  }
3840  return ost_min;
3841 }
3842 
3843 static void set_tty_echo(int on)
3844 {
3845 #if HAVE_TERMIOS_H
3846  struct termios tty;
3847  if (tcgetattr(0, &tty) == 0) {
3848  if (on) tty.c_lflag |= ECHO;
3849  else tty.c_lflag &= ~ECHO;
3850  tcsetattr(0, TCSANOW, &tty);
3851  }
3852 #endif
3853 }
3854 
3855 static int check_keyboard_interaction(int64_t cur_time)
3856 {
3857  int i, ret, key;
3858  static int64_t last_time;
3859  if (received_nb_signals)
3860  return AVERROR_EXIT;
3861  /* read_key() returns 0 on EOF */
3862  if(cur_time - last_time >= 100000 && !run_as_daemon){
3863  key = read_key();
3864  last_time = cur_time;
3865  }else
3866  key = -1;
3867  if (key == 'q')
3868  return AVERROR_EXIT;
3869  if (key == '+') av_log_set_level(av_log_get_level()+10);
3870  if (key == '-') av_log_set_level(av_log_get_level()-10);
3871  if (key == 's') qp_hist ^= 1;
3872  if (key == 'h'){
3873  if (do_hex_dump){
3874  do_hex_dump = do_pkt_dump = 0;
3875  } else if(do_pkt_dump){
3876  do_hex_dump = 1;
3877  } else
3878  do_pkt_dump = 1;
3880  }
3881  if (key == 'c' || key == 'C'){
3882  char buf[4096], target[64], command[256], arg[256] = {0};
3883  double time;
3884  int k, n = 0;
3885  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3886  i = 0;
3887  set_tty_echo(1);
3888  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3889  if (k > 0)
3890  buf[i++] = k;
3891  buf[i] = 0;
3892  set_tty_echo(0);
3893  fprintf(stderr, "\n");
3894  if (k > 0 &&
3895  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3896  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3897  target, time, command, arg);
3898  for (i = 0; i < nb_filtergraphs; i++) {
3899  FilterGraph *fg = filtergraphs[i];
3900  if (fg->graph) {
3901  if (time < 0) {
3902  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3903  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3904  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3905  } else if (key == 'c') {
3906  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3907  ret = AVERROR_PATCHWELCOME;
3908  } else {
3909  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3910  if (ret < 0)
3911  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3912  }
3913  }
3914  }
3915  } else {
3917  "Parse error, at least 3 arguments were expected, "
3918  "only %d given in string '%s'\n", n, buf);
3919  }
3920  }
3921  if (key == 'd' || key == 'D'){
3922  int debug=0;
3923  if(key == 'D') {
3924  debug = input_streams[0]->st->codec->debug<<1;
3925  if(!debug) debug = 1;
3926  while(debug & (FF_DEBUG_DCT_COEFF
3927 #if FF_API_DEBUG_MV
3928  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3929 #endif
3930  )) //unsupported, would just crash
3931  debug += debug;
3932  }else{
3933  char buf[32];
3934  int k = 0;
3935  i = 0;
3936  set_tty_echo(1);
3937  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3938  if (k > 0)
3939  buf[i++] = k;
3940  buf[i] = 0;
3941  set_tty_echo(0);
3942  fprintf(stderr, "\n");
3943  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3944  fprintf(stderr,"error parsing debug value\n");
3945  }
3946  for(i=0;i<nb_input_streams;i++) {
3947  input_streams[i]->st->codec->debug = debug;
3948  }
3949  for(i=0;i<nb_output_streams;i++) {
3950  OutputStream *ost = output_streams[i];
3951  ost->enc_ctx->debug = debug;
3952  }
3953  if(debug) av_log_set_level(AV_LOG_DEBUG);
3954  fprintf(stderr,"debug=%d\n", debug);
3955  }
3956  if (key == '?'){
3957  fprintf(stderr, "key function\n"
3958  "? show this help\n"
3959  "+ increase verbosity\n"
3960  "- decrease verbosity\n"
3961  "c Send command to first matching filter supporting it\n"
3962  "C Send/Queue command to all matching filters\n"
3963  "D cycle through available debug modes\n"
3964  "h dump packets/hex press to cycle through the 3 states\n"
3965  "q quit\n"
3966  "s Show QP histogram\n"
3967  );
3968  }
3969  return 0;
3970 }
3971 
3972 #if HAVE_THREADS
3973 static void *input_thread(void *arg)
3974 {
3975  InputFile *f = arg;
3976  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3977  int ret = 0;
3978 
3979  while (1) {
3980  AVPacket pkt;
3981  ret = av_read_frame(f->ctx, &pkt);
3982 
3983  if (ret == AVERROR(EAGAIN)) {
3984  av_usleep(10000);
3985  continue;
3986  }
3987  if (ret < 0) {
3988  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3989  break;
3990  }
3991  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3992  if (flags && ret == AVERROR(EAGAIN)) {
3993  flags = 0;
3994  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3996  "Thread message queue blocking; consider raising the "
3997  "thread_queue_size option (current value: %d)\n",
3998  f->thread_queue_size);
3999  }
4000  if (ret < 0) {
4001  if (ret != AVERROR_EOF)
4002  av_log(f->ctx, AV_LOG_ERROR,
4003  "Unable to send packet to main thread: %s\n",
4004  av_err2str(ret));
4005  av_packet_unref(&pkt);
4006  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4007  break;
4008  }
4009  }
4010 
4011  return NULL;
4012 }
4013 
4014 static void free_input_threads(void)
4015 {
4016  int i;
4017 
4018  for (i = 0; i < nb_input_files; i++) {
4019  InputFile *f = input_files[i];
4020  AVPacket pkt;
4021 
4022  if (!f || !f->in_thread_queue)
4023  continue;
4025  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4026  av_packet_unref(&pkt);
4027 
4028  pthread_join(f->thread, NULL);
4029  f->joined = 1;
4030  av_thread_message_queue_free(&f->in_thread_queue);
4031  }
4032 }
4033 
4034 static int init_input_threads(void)
4035 {
4036  int i, ret;
4037 
4038  if (nb_input_files == 1)
4039  return 0;
4040 
4041  for (i = 0; i < nb_input_files; i++) {
4042  InputFile *f = input_files[i];
4043 
4044  if (f->ctx->pb ? !f->ctx->pb->seekable :
4045  strcmp(f->ctx->iformat->name, "lavfi"))
4046  f->non_blocking = 1;
4047  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4048  f->thread_queue_size, sizeof(AVPacket));
4049  if (ret < 0)
4050  return ret;
4051 
4052  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4053  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4054  av_thread_message_queue_free(&f->in_thread_queue);
4055  return AVERROR(ret);
4056  }
4057  }
4058  return 0;
4059 }
4060 
4061 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4062 {
4063  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4064  f->non_blocking ?
4066 }
4067 #endif
4068 
4070 {
4071  if (f->rate_emu) {
4072  int i;
4073  for (i = 0; i < f->nb_streams; i++) {
4074  InputStream *ist = input_streams[f->ist_index + i];
4075  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4076  int64_t now = av_gettime_relative() - ist->start;
4077  if (pts > now)
4078  return AVERROR(EAGAIN);
4079  }
4080  }
4081 
4082 #if HAVE_THREADS
4083  if (nb_input_files > 1)
4084  return get_input_packet_mt(f, pkt);
4085 #endif
4086  return av_read_frame(f->ctx, pkt);
4087 }
4088 
4089 static int got_eagain(void)
4090 {
4091  int i;
4092  for (i = 0; i < nb_output_streams; i++)
4093  if (output_streams[i]->unavailable)
4094  return 1;
4095  return 0;
4096 }
4097 
4098 static void reset_eagain(void)
4099 {
4100  int i;
4101  for (i = 0; i < nb_input_files; i++)
4102  input_files[i]->eagain = 0;
4103  for (i = 0; i < nb_output_streams; i++)
4104  output_streams[i]->unavailable = 0;
4105 }
4106 
4107 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4108 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4109  AVRational time_base)
4110 {
4111  int ret;
4112 
4113  if (!*duration) {
4114  *duration = tmp;
4115  return tmp_time_base;
4116  }
4117 
4118  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4119  if (ret < 0) {
4120  *duration = tmp;
4121  return tmp_time_base;
4122  }
4123 
4124  return time_base;
4125 }
4126 
4128 {
4129  InputStream *ist;
4130  AVCodecContext *avctx;
4131  int i, ret, has_audio = 0;
4132  int64_t duration = 0;
4133 
4134  ret = av_seek_frame(is, -1, is->start_time, 0);
4135  if (ret < 0)
4136  return ret;
4137 
4138  for (i = 0; i < ifile->nb_streams; i++) {
4139  ist = input_streams[ifile->ist_index + i];
4140  avctx = ist->dec_ctx;
4141 
4142  /* duration is the length of the last frame in a stream
4143  * when audio stream is present we don't care about
4144  * last video frame length because it's not defined exactly */
4145  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4146  has_audio = 1;
4147  }
4148 
4149  for (i = 0; i < ifile->nb_streams; i++) {
4150  ist = input_streams[ifile->ist_index + i];
4151  avctx = ist->dec_ctx;
4152 
4153  if (has_audio) {
4154  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4155  AVRational sample_rate = {1, avctx->sample_rate};
4156 
4157  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4158  } else {
4159  continue;
4160  }
4161  } else {
4162  if (ist->framerate.num) {
4163  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4164  } else if (ist->st->avg_frame_rate.num) {
4165  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4166  } else {
4167  duration = 1;
4168  }
4169  }
4170  if (!ifile->duration)
4171  ifile->time_base = ist->st->time_base;
4172  /* the total duration of the stream, max_pts - min_pts is
4173  * the duration of the stream without the last frame */
4174  duration += ist->max_pts - ist->min_pts;
4175  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4176  ifile->time_base);
4177  }
4178 
4179  if (ifile->loop > 0)
4180  ifile->loop--;
4181 
4182  return ret;
4183 }
4184 
4185 /*
4186  * Return
4187  * - 0 -- one packet was read and processed
4188  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4189  * this function should be called again
4190  * - AVERROR_EOF -- this function should not be called again
4191  */
4192 static int process_input(int file_index)
4193 {
4194  InputFile *ifile = input_files[file_index];
4195  AVFormatContext *is;
4196  InputStream *ist;
4197  AVPacket pkt;
4198  int ret, i, j;
4199  int64_t duration;
4200  int64_t pkt_dts;
4201 
4202  is = ifile->ctx;
4203  ret = get_input_packet(ifile, &pkt);
4204 
4205  if (ret == AVERROR(EAGAIN)) {
4206  ifile->eagain = 1;
4207  return ret;
4208  }
4209  if (ret < 0 && ifile->loop) {
4210  AVCodecContext *avctx;
4211  for (i = 0; i < ifile->nb_streams; i++) {
4212  ist = input_streams[ifile->ist_index + i];
4213  avctx = ist->dec_ctx;
4214  if (ist->decoding_needed) {
4215  ret = process_input_packet(ist, NULL, 1);
4216  if (ret>0)
4217  return 0;
4218  avcodec_flush_buffers(avctx);
4219  }
4220  }
4221  ret = seek_to_start(ifile, is);
4222  if (ret < 0)
4223  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4224  else
4225  ret = get_input_packet(ifile, &pkt);
4226  if (ret == AVERROR(EAGAIN)) {
4227  ifile->eagain = 1;
4228  return ret;
4229  }
4230  }
4231  if (ret < 0) {
4232  if (ret != AVERROR_EOF) {
4233  print_error(is->url, ret);
4234  if (exit_on_error)
4235  exit_program(1);
4236  }
4237 
4238  for (i = 0; i < ifile->nb_streams; i++) {
4239  ist = input_streams[ifile->ist_index + i];
4240  if (ist->decoding_needed) {
4241  ret = process_input_packet(ist, NULL, 0);
4242  if (ret>0)
4243  return 0;
4244  }
4245 
4246  /* mark all outputs that don't go through lavfi as finished */
4247  for (j = 0; j < nb_output_streams; j++) {
4248  OutputStream *ost = output_streams[j];
4249 
4250  if (ost->source_index == ifile->ist_index + i &&
4251  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4252  finish_output_stream(ost);
4253  }
4254  }
4255 
4256  ifile->eof_reached = 1;
4257  return AVERROR(EAGAIN);
4258  }
4259 
4260  reset_eagain();
4261 
4262  if (do_pkt_dump) {
4264  is->streams[pkt.stream_index]);
4265  }
4266  /* the following test is needed in case new streams appear
4267  dynamically in stream : we ignore them */
4268  if (pkt.stream_index >= ifile->nb_streams) {
4269  report_new_stream(file_index, &pkt);
4270  goto discard_packet;
4271  }
4272 
4273  ist = input_streams[ifile->ist_index + pkt.stream_index];
4274 
4275  ist->data_size += pkt.size;
4276  ist->nb_packets++;
4277 
4278  if (ist->discard)
4279  goto discard_packet;
4280 
4281  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4282  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4283  exit_program(1);
4284  }
4285 
4286  if (debug_ts) {
4287  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4288  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4292  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4293  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4294  av_ts2str(input_files[ist->file_index]->ts_offset),
4295  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4296  }
4297 
4298  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4299  int64_t stime, stime2;
4300  // Correcting starttime based on the enabled streams
4301  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4302  // so we instead do it here as part of discontinuity handling
4303  if ( ist->next_dts == AV_NOPTS_VALUE
4304  && ifile->ts_offset == -is->start_time
4305  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4306  int64_t new_start_time = INT64_MAX;
4307  for (i=0; i<is->nb_streams; i++) {
4308  AVStream *st = is->streams[i];
4309  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4310  continue;
4311  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4312  }
4313  if (new_start_time > is->start_time) {
4314  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4315  ifile->ts_offset = -new_start_time;
4316  }
4317  }
4318 
4319  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4320  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4321  ist->wrap_correction_done = 1;
4322 
4323  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4324  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4325  ist->wrap_correction_done = 0;
4326  }
4327  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4328  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4329  ist->wrap_correction_done = 0;
4330  }
4331  }
4332 
4333  /* add the stream-global side data to the first packet */
4334  if (ist->nb_packets == 1) {
4335  for (i = 0; i < ist->st->nb_side_data; i++) {
4336  AVPacketSideData *src_sd = &ist->st->side_data[i];
4337  uint8_t *dst_data;
4338 
4339  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4340  continue;
4341 
4342  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4343  continue;
4344 
4345  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4346  if (!dst_data)
4347  exit_program(1);
4348 
4349  memcpy(dst_data, src_sd->data, src_sd->size);
4350  }
4351  }
4352 
4353  if (pkt.dts != AV_NOPTS_VALUE)
4354  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4355  if (pkt.pts != AV_NOPTS_VALUE)
4356  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4357 
4358  if (pkt.pts != AV_NOPTS_VALUE)
4359  pkt.pts *= ist->ts_scale;
4360  if (pkt.dts != AV_NOPTS_VALUE)
4361  pkt.dts *= ist->ts_scale;
4362 
4364  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4366  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4367  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4368  int64_t delta = pkt_dts - ifile->last_ts;
4369  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4370  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4371  ifile->ts_offset -= delta;
4373  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4374  delta, ifile->ts_offset);
4375  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4376  if (pkt.pts != AV_NOPTS_VALUE)
4377  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4378  }
4379  }
4380 
4381  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4382  if (pkt.pts != AV_NOPTS_VALUE) {
4383  pkt.pts += duration;
4384  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4385  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4386  }
4387 
4388  if (pkt.dts != AV_NOPTS_VALUE)
4389  pkt.dts += duration;
4390 
4392  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4394  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4395  !copy_ts) {
4396  int64_t delta = pkt_dts - ist->next_dts;
4397  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4398  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4399  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4400  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4401  ifile->ts_offset -= delta;
4403  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4404  delta, ifile->ts_offset);
4405  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4406  if (pkt.pts != AV_NOPTS_VALUE)
4407  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4408  }
4409  } else {
4410  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4411  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4412  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4413  pkt.dts = AV_NOPTS_VALUE;
4414  }
4415  if (pkt.pts != AV_NOPTS_VALUE){
4416  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4417  delta = pkt_pts - ist->next_dts;
4418  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4419  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4420  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4421  pkt.pts = AV_NOPTS_VALUE;
4422  }
4423  }
4424  }
4425  }
4426 
4427  if (pkt.dts != AV_NOPTS_VALUE)
4428  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4429 
4430  if (debug_ts) {
4431  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4433  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4434  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4435  av_ts2str(input_files[ist->file_index]->ts_offset),
4436  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4437  }
4438 
4439  sub2video_heartbeat(ist, pkt.pts);
4440 
4441  process_input_packet(ist, &pkt, 0);
4442 
4443 discard_packet:
4444  av_packet_unref(&pkt);
4445 
4446  return 0;
4447 }
4448 
4449 /**
4450  * Perform a step of transcoding for the specified filter graph.
4451  *
4452  * @param[in] graph filter graph to consider
4453  * @param[out] best_ist input stream where a frame would allow to continue
4454  * @return 0 for success, <0 for error
4455  */
4456 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4457 {
4458  int i, ret;
4459  int nb_requests, nb_requests_max = 0;
4460  InputFilter *ifilter;
4461  InputStream *ist;
4462 
4463  *best_ist = NULL;
4464  ret = avfilter_graph_request_oldest(graph->graph);
4465  if (ret >= 0)
4466  return reap_filters(0);
4467 
4468  if (ret == AVERROR_EOF) {
4469  ret = reap_filters(1);
4470  for (i = 0; i < graph->nb_outputs; i++)
4471  close_output_stream(graph->outputs[i]->ost);
4472  return ret;
4473  }
4474  if (ret != AVERROR(EAGAIN))
4475  return ret;
4476 
4477  for (i = 0; i < graph->nb_inputs; i++) {
4478  ifilter = graph->inputs[i];
4479  ist = ifilter->ist;
4480  if (input_files[ist->file_index]->eagain ||
4481  input_files[ist->file_index]->eof_reached)
4482  continue;
4483  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4484  if (nb_requests > nb_requests_max) {
4485  nb_requests_max = nb_requests;
4486  *best_ist = ist;
4487  }
4488  }
4489 
4490  if (!*best_ist)
4491  for (i = 0; i < graph->nb_outputs; i++)
4492  graph->outputs[i]->ost->unavailable = 1;
4493 
4494  return 0;
4495 }
4496 
4497 /**
4498  * Run a single step of transcoding.
4499  *
4500  * @return 0 for success, <0 for error
4501  */
4502 static int transcode_step(void)
4503 {
4504  OutputStream *ost;
4505  InputStream *ist = NULL;
4506  int ret;
4507 
4508  ost = choose_output();
4509  if (!ost) {
4510  if (got_eagain()) {
4511  reset_eagain();
4512  av_usleep(10000);
4513  return 0;
4514  }
4515  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4516  return AVERROR_EOF;
4517  }
4518 
4519  if (ost->filter && !ost->filter->graph->graph) {
4521  ret = configure_filtergraph(ost->filter->graph);
4522  if (ret < 0) {
4523  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4524  return ret;
4525  }
4526  }
4527  }
4528 
4529  if (ost->filter && ost->filter->graph->graph) {
4530  if (!ost->initialized) {
4531  char error[1024] = {0};
4532  ret = init_output_stream(ost, error, sizeof(error));
4533  if (ret < 0) {
4534  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4535  ost->file_index, ost->index, error);
4536  exit_program(1);
4537  }
4538  }
4539  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4540  return ret;
4541  if (!ist)
4542  return 0;
4543  } else if (ost->filter) {
4544  int i;
4545  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4546  InputFilter *ifilter = ost->filter->graph->inputs[i];
4547  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4548  ist = ifilter->ist;
4549  break;
4550  }
4551  }
4552  if (!ist) {
4553  ost->inputs_done = 1;
4554  return 0;
4555  }
4556  } else {
4557  av_assert0(ost->source_index >= 0);
4558  ist = input_streams[ost->source_index];
4559  }
4560 
4561  ret = process_input(ist->file_index);
4562  if (ret == AVERROR(EAGAIN)) {
4563  if (input_files[ist->file_index]->eagain)
4564  ost->unavailable = 1;
4565  return 0;
4566  }
4567 
4568  if (ret < 0)
4569  return ret == AVERROR_EOF ? 0 : ret;
4570 
4571  return reap_filters(0);
4572 }
4573 
4574 /*
4575  * The following code is the main loop of the file converter
4576  */
4577 static int transcode(void)
4578 {
4579  int ret, i;
4580  AVFormatContext *os;
4581  OutputStream *ost;
4582  InputStream *ist;
4583  int64_t timer_start;
4584  int64_t total_packets_written = 0;
4585 
4586  ret = transcode_init();
4587  if (ret < 0)
4588  goto fail;
4589 
4590  if (stdin_interaction) {
4591  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4592  }
4593 
4594  timer_start = av_gettime_relative();
4595 
4596 #if HAVE_THREADS
4597  if ((ret = init_input_threads()) < 0)
4598  goto fail;
4599 #endif
4600 
4601  while (!received_sigterm) {
4602  int64_t cur_time= av_gettime_relative();
4603 
4604  /* if 'q' pressed, exits */
4605  if (stdin_interaction)
4606  if (check_keyboard_interaction(cur_time) < 0)
4607  break;
4608 
4609  /* check if there's any stream where output is still needed */
4610  if (!need_output()) {
4611  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4612  break;
4613  }
4614 
4615  ret = transcode_step();
4616  if (ret < 0 && ret != AVERROR_EOF) {
4617  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4618  break;
4619  }
4620 
4621  /* dump report by using the output first video and audio streams */
4622  print_report(0, timer_start, cur_time);
4623  }
4624 #if HAVE_THREADS
4625  free_input_threads();
4626 #endif
4627 
4628  /* at the end of stream, we must flush the decoder buffers */
4629  for (i = 0; i < nb_input_streams; i++) {
4630  ist = input_streams[i];
4631  if (!input_files[ist->file_index]->eof_reached) {
4632  process_input_packet(ist, NULL, 0);
4633  }
4634  }
4635  flush_encoders();
4636 
4637  term_exit();
4638 
4639  /* write the trailer if needed and close file */
4640  for (i = 0; i < nb_output_files; i++) {
4641  os = output_files[i]->ctx;
4642  if (!output_files[i]->header_written) {
4644  "Nothing was written into output file %d (%s), because "
4645  "at least one of its streams received no packets.\n",
4646  i, os->url);
4647  continue;
4648  }