FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
133 
134 static int want_sdp = 1;
135 
136 static int current_time;
138 
140 
145 
150 
153 
154 #if HAVE_TERMIOS_H
155 
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
159 #endif
160 
161 #if HAVE_PTHREADS
162 static void free_input_threads(void);
163 #endif
164 
165 /* sub2video hack:
166  Convert subtitles to video with alpha to insert them in filter graphs.
167  This is a temporary solution until libavfilter gets real subtitles support.
168  */
169 
171 {
172  int ret;
173  AVFrame *frame = ist->sub2video.frame;
174 
175  av_frame_unref(frame);
176  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
179  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180  return ret;
181  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
182  return 0;
183 }
184 
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186  AVSubtitleRect *r)
187 {
188  uint32_t *pal, *dst2;
189  uint8_t *src, *src2;
190  int x, y;
191 
192  if (r->type != SUBTITLE_BITMAP) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194  return;
195  }
196  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198  r->x, r->y, r->w, r->h, w, h
199  );
200  return;
201  }
202 
203  dst += r->y * dst_linesize + r->x * 4;
204  src = r->data[0];
205  pal = (uint32_t *)r->data[1];
206  for (y = 0; y < r->h; y++) {
207  dst2 = (uint32_t *)dst;
208  src2 = src;
209  for (x = 0; x < r->w; x++)
210  *(dst2++) = pal[*(src2++)];
211  dst += dst_linesize;
212  src += r->linesize[0];
213  }
214 }
215 
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 {
218  AVFrame *frame = ist->sub2video.frame;
219  int i;
220 
221  av_assert1(frame->data[0]);
222  ist->sub2video.last_pts = frame->pts = pts;
223  for (i = 0; i < ist->nb_filters; i++)
227 }
228 
230 {
231  AVFrame *frame = ist->sub2video.frame;
232  int8_t *dst;
233  int dst_linesize;
234  int num_rects, i;
235  int64_t pts, end_pts;
236 
237  if (!frame)
238  return;
239  if (sub) {
240  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
241  AV_TIME_BASE_Q, ist->st->time_base);
242  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
243  AV_TIME_BASE_Q, ist->st->time_base);
244  num_rects = sub->num_rects;
245  } else {
246  pts = ist->sub2video.end_pts;
247  end_pts = INT64_MAX;
248  num_rects = 0;
249  }
250  if (sub2video_get_blank_frame(ist) < 0) {
252  "Impossible to get a blank canvas.\n");
253  return;
254  }
255  dst = frame->data [0];
256  dst_linesize = frame->linesize[0];
257  for (i = 0; i < num_rects; i++)
258  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
259  sub2video_push_ref(ist, pts);
260  ist->sub2video.end_pts = end_pts;
261 }
262 
263 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 {
265  InputFile *infile = input_files[ist->file_index];
266  int i, j, nb_reqs;
267  int64_t pts2;
268 
269  /* When a frame is read from a file, examine all sub2video streams in
270  the same file and send the sub2video frame again. Otherwise, decoded
271  video frames could be accumulating in the filter graph while a filter
272  (possibly overlay) is desperately waiting for a subtitle frame. */
273  for (i = 0; i < infile->nb_streams; i++) {
274  InputStream *ist2 = input_streams[infile->ist_index + i];
275  if (!ist2->sub2video.frame)
276  continue;
277  /* subtitles seem to be usually muxed ahead of other streams;
278  if not, subtracting a larger time here is necessary */
279  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
280  /* do not send the heartbeat frame if the subtitle is already ahead */
281  if (pts2 <= ist2->sub2video.last_pts)
282  continue;
283  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
284  sub2video_update(ist2, NULL);
285  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287  if (nb_reqs)
288  sub2video_push_ref(ist2, pts2);
289  }
290 }
291 
292 static void sub2video_flush(InputStream *ist)
293 {
294  int i;
295 
296  if (ist->sub2video.end_pts < INT64_MAX)
297  sub2video_update(ist, NULL);
298  for (i = 0; i < ist->nb_filters; i++)
300 }
301 
302 /* end of sub2video hack */
303 
304 static void term_exit_sigsafe(void)
305 {
306 #if HAVE_TERMIOS_H
307  if(restore_tty)
308  tcsetattr (0, TCSANOW, &oldtty);
309 #endif
310 }
311 
312 void term_exit(void)
313 {
314  av_log(NULL, AV_LOG_QUIET, "%s", "");
316 }
317 
318 static volatile int received_sigterm = 0;
319 static volatile int received_nb_signals = 0;
320 static volatile int transcode_init_done = 0;
321 static volatile int ffmpeg_exited = 0;
322 static int main_return_code = 0;
323 
324 static void
326 {
327  received_sigterm = sig;
330  if(received_nb_signals > 3) {
331  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
332  strlen("Received > 3 system signals, hard exiting\n"));
333 
334  exit(123);
335  }
336 }
337 
338 #if HAVE_SETCONSOLECTRLHANDLER
339 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 {
341  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 
343  switch (fdwCtrlType)
344  {
345  case CTRL_C_EVENT:
346  case CTRL_BREAK_EVENT:
347  sigterm_handler(SIGINT);
348  return TRUE;
349 
350  case CTRL_CLOSE_EVENT:
351  case CTRL_LOGOFF_EVENT:
352  case CTRL_SHUTDOWN_EVENT:
353  sigterm_handler(SIGTERM);
354  /* Basically, with these 3 events, when we return from this method the
355  process is hard terminated, so stall as long as we need to
356  to try and let the main thread(s) clean up and gracefully terminate
357  (we have at most 5 seconds, but should be done far before that). */
358  while (!ffmpeg_exited) {
359  Sleep(0);
360  }
361  return TRUE;
362 
363  default:
364  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
365  return FALSE;
366  }
367 }
368 #endif
369 
370 void term_init(void)
371 {
372 #if HAVE_TERMIOS_H
374  struct termios tty;
375  if (tcgetattr (0, &tty) == 0) {
376  oldtty = tty;
377  restore_tty = 1;
378 
379  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
380  |INLCR|IGNCR|ICRNL|IXON);
381  tty.c_oflag |= OPOST;
382  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
383  tty.c_cflag &= ~(CSIZE|PARENB);
384  tty.c_cflag |= CS8;
385  tty.c_cc[VMIN] = 1;
386  tty.c_cc[VTIME] = 0;
387 
388  tcsetattr (0, TCSANOW, &tty);
389  }
390  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
391  }
392 #endif
393 
394  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
395  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 #ifdef SIGXCPU
397  signal(SIGXCPU, sigterm_handler);
398 #endif
399 #if HAVE_SETCONSOLECTRLHANDLER
400  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
401 #endif
402 }
403 
404 /* read a key without blocking */
405 static int read_key(void)
406 {
407  unsigned char ch;
408 #if HAVE_TERMIOS_H
409  int n = 1;
410  struct timeval tv;
411  fd_set rfds;
412 
413  FD_ZERO(&rfds);
414  FD_SET(0, &rfds);
415  tv.tv_sec = 0;
416  tv.tv_usec = 0;
417  n = select(1, &rfds, NULL, NULL, &tv);
418  if (n > 0) {
419  n = read(0, &ch, 1);
420  if (n == 1)
421  return ch;
422 
423  return n;
424  }
425 #elif HAVE_KBHIT
426 # if HAVE_PEEKNAMEDPIPE
427  static int is_pipe;
428  static HANDLE input_handle;
429  DWORD dw, nchars;
430  if(!input_handle){
431  input_handle = GetStdHandle(STD_INPUT_HANDLE);
432  is_pipe = !GetConsoleMode(input_handle, &dw);
433  }
434 
435  if (is_pipe) {
436  /* When running under a GUI, you will end here. */
437  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
438  // input pipe may have been closed by the program that ran ffmpeg
439  return -1;
440  }
441  //Read it
442  if(nchars != 0) {
443  read(0, &ch, 1);
444  return ch;
445  }else{
446  return -1;
447  }
448  }
449 # endif
450  if(kbhit())
451  return(getch());
452 #endif
453  return -1;
454 }
455 
456 static int decode_interrupt_cb(void *ctx)
457 {
459 }
460 
462 
463 static void ffmpeg_cleanup(int ret)
464 {
465  int i, j;
466 
467  if (do_benchmark) {
468  int maxrss = getmaxrss() / 1024;
469  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
470  }
471 
472  for (i = 0; i < nb_filtergraphs; i++) {
473  FilterGraph *fg = filtergraphs[i];
475  for (j = 0; j < fg->nb_inputs; j++) {
476  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
477  AVFrame *frame;
478  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
479  sizeof(frame), NULL);
480  av_frame_free(&frame);
481  }
483  if (fg->inputs[j]->ist->sub2video.sub_queue) {
484  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
485  AVSubtitle sub;
487  &sub, sizeof(sub), NULL);
488  avsubtitle_free(&sub);
489  }
491  }
493  av_freep(&fg->inputs[j]->name);
494  av_freep(&fg->inputs[j]);
495  }
496  av_freep(&fg->inputs);
497  for (j = 0; j < fg->nb_outputs; j++) {
498  av_freep(&fg->outputs[j]->name);
499  av_freep(&fg->outputs[j]->formats);
500  av_freep(&fg->outputs[j]->channel_layouts);
501  av_freep(&fg->outputs[j]->sample_rates);
502  av_freep(&fg->outputs[j]);
503  }
504  av_freep(&fg->outputs);
505  av_freep(&fg->graph_desc);
506 
507  av_freep(&filtergraphs[i]);
508  }
509  av_freep(&filtergraphs);
510 
512 
513  /* close files */
514  for (i = 0; i < nb_output_files; i++) {
515  OutputFile *of = output_files[i];
517  if (!of)
518  continue;
519  s = of->ctx;
520  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
521  avio_closep(&s->pb);
523  av_dict_free(&of->opts);
524 
525  av_freep(&output_files[i]);
526  }
527  for (i = 0; i < nb_output_streams; i++) {
528  OutputStream *ost = output_streams[i];
529 
530  if (!ost)
531  continue;
532 
533  for (j = 0; j < ost->nb_bitstream_filters; j++)
534  av_bsf_free(&ost->bsf_ctx[j]);
535  av_freep(&ost->bsf_ctx);
537 
539  av_frame_free(&ost->last_frame);
540  av_dict_free(&ost->encoder_opts);
541 
542  av_parser_close(ost->parser);
544 
545  av_freep(&ost->forced_keyframes);
547  av_freep(&ost->avfilter);
548  av_freep(&ost->logfile_prefix);
549 
551  ost->audio_channels_mapped = 0;
552 
553  av_dict_free(&ost->sws_dict);
554 
557 
558  if (ost->muxing_queue) {
559  while (av_fifo_size(ost->muxing_queue)) {
560  AVPacket pkt;
561  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
562  av_packet_unref(&pkt);
563  }
565  }
566 
567  av_freep(&output_streams[i]);
568  }
569 #if HAVE_PTHREADS
570  free_input_threads();
571 #endif
572  for (i = 0; i < nb_input_files; i++) {
573  avformat_close_input(&input_files[i]->ctx);
574  av_freep(&input_files[i]);
575  }
576  for (i = 0; i < nb_input_streams; i++) {
577  InputStream *ist = input_streams[i];
578 
581  av_dict_free(&ist->decoder_opts);
584  av_freep(&ist->filters);
585  av_freep(&ist->hwaccel_device);
586  av_freep(&ist->dts_buffer);
587 
589 
590  av_freep(&input_streams[i]);
591  }
592 
593  if (vstats_file) {
594  if (fclose(vstats_file))
596  "Error closing vstats file, loss of information possible: %s\n",
597  av_err2str(AVERROR(errno)));
598  }
600 
601  av_freep(&input_streams);
602  av_freep(&input_files);
603  av_freep(&output_streams);
604  av_freep(&output_files);
605 
606  uninit_opts();
607 
609 
610  if (received_sigterm) {
611  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
612  (int) received_sigterm);
613  } else if (ret && transcode_init_done) {
614  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
615  }
616  term_exit();
617  ffmpeg_exited = 1;
618 }
619 
621 {
622  AVDictionaryEntry *t = NULL;
623 
624  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
626  }
627 }
628 
630 {
632  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
633  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
634  exit_program(1);
635  }
636 }
637 
638 static void abort_codec_experimental(AVCodec *c, int encoder)
639 {
640  exit_program(1);
641 }
642 
643 static void update_benchmark(const char *fmt, ...)
644 {
645  if (do_benchmark_all) {
646  int64_t t = getutime();
647  va_list va;
648  char buf[1024];
649 
650  if (fmt) {
651  va_start(va, fmt);
652  vsnprintf(buf, sizeof(buf), fmt, va);
653  va_end(va);
654  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
655  }
656  current_time = t;
657  }
658 }
659 
660 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
661 {
662  int i;
663  for (i = 0; i < nb_output_streams; i++) {
664  OutputStream *ost2 = output_streams[i];
665  ost2->finished |= ost == ost2 ? this_stream : others;
666  }
667 }
668 
670 {
671  AVFormatContext *s = of->ctx;
672  AVStream *st = ost->st;
673  int ret;
674 
675  if (!of->header_written) {
676  AVPacket tmp_pkt = {0};
677  /* the muxer is not initialized yet, buffer the packet */
678  if (!av_fifo_space(ost->muxing_queue)) {
679  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
680  ost->max_muxing_queue_size);
681  if (new_size <= av_fifo_size(ost->muxing_queue)) {
683  "Too many packets buffered for output stream %d:%d.\n",
684  ost->file_index, ost->st->index);
685  exit_program(1);
686  }
687  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
688  if (ret < 0)
689  exit_program(1);
690  }
691  ret = av_packet_ref(&tmp_pkt, pkt);
692  if (ret < 0)
693  exit_program(1);
694  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
695  av_packet_unref(pkt);
696  return;
697  }
698 
701  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
702 
703  /*
704  * Audio encoders may split the packets -- #frames in != #packets out.
705  * But there is no reordering, so we can limit the number of output packets
706  * by simply dropping them here.
707  * Counting encoded video frames needs to be done separately because of
708  * reordering, see do_video_out()
709  */
710  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
711  if (ost->frame_number >= ost->max_frames) {
712  av_packet_unref(pkt);
713  return;
714  }
715  ost->frame_number++;
716  }
717  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
718  int i;
720  NULL);
721  ost->quality = sd ? AV_RL32(sd) : -1;
722  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
723 
724  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
725  if (sd && i < sd[5])
726  ost->error[i] = AV_RL64(sd + 8 + 8*i);
727  else
728  ost->error[i] = -1;
729  }
730 
731  if (ost->frame_rate.num && ost->is_cfr) {
732  if (pkt->duration > 0)
733  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
734  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
735  ost->mux_timebase);
736  }
737  }
738 
739  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
740 
741  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
742  if (pkt->dts != AV_NOPTS_VALUE &&
743  pkt->pts != AV_NOPTS_VALUE &&
744  pkt->dts > pkt->pts) {
745  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
746  pkt->dts, pkt->pts,
747  ost->file_index, ost->st->index);
748  pkt->pts =
749  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
750  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
751  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
752  }
754  pkt->dts != AV_NOPTS_VALUE &&
755  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
756  ost->last_mux_dts != AV_NOPTS_VALUE) {
757  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
758  if (pkt->dts < max) {
759  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
760  av_log(s, loglevel, "Non-monotonous DTS in output stream "
761  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
762  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
763  if (exit_on_error) {
764  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
765  exit_program(1);
766  }
767  av_log(s, loglevel, "changing to %"PRId64". This may result "
768  "in incorrect timestamps in the output file.\n",
769  max);
770  if (pkt->pts >= pkt->dts)
771  pkt->pts = FFMAX(pkt->pts, max);
772  pkt->dts = max;
773  }
774  }
775  }
776  ost->last_mux_dts = pkt->dts;
777 
778  ost->data_size += pkt->size;
779  ost->packets_written++;
780 
781  pkt->stream_index = ost->index;
782 
783  if (debug_ts) {
784  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
785  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
787  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
788  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
789  pkt->size
790  );
791  }
792 
793  ret = av_interleaved_write_frame(s, pkt);
794  if (ret < 0) {
795  print_error("av_interleaved_write_frame()", ret);
796  main_return_code = 1;
798  }
799  av_packet_unref(pkt);
800 }
801 
803 {
804  OutputFile *of = output_files[ost->file_index];
805 
806  ost->finished |= ENCODER_FINISHED;
807  if (of->shortest) {
808  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
809  of->recording_time = FFMIN(of->recording_time, end);
810  }
811 }
812 
814 {
815  int ret = 0;
816 
817  /* apply the output bitstream filters, if any */
818  if (ost->nb_bitstream_filters) {
819  int idx;
820 
821  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
822  if (ret < 0)
823  goto finish;
824 
825  idx = 1;
826  while (idx) {
827  /* get a packet from the previous filter up the chain */
828  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
829  if (ret == AVERROR(EAGAIN)) {
830  ret = 0;
831  idx--;
832  continue;
833  } else if (ret < 0)
834  goto finish;
835  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
836  * the api states this shouldn't happen after init(). Propagate it here to the
837  * muxer and to the next filters in the chain to workaround this.
838  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
839  * par_out->extradata and adapt muxers accordingly to get rid of this. */
840  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
841  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
842  if (ret < 0)
843  goto finish;
844  ost->bsf_extradata_updated[idx - 1] |= 1;
845  }
846 
847  /* send it to the next filter down the chain or to the muxer */
848  if (idx < ost->nb_bitstream_filters) {
849  /* HACK/FIXME! - See above */
850  if (!(ost->bsf_extradata_updated[idx] & 2)) {
851  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
852  if (ret < 0)
853  goto finish;
854  ost->bsf_extradata_updated[idx] |= 2;
855  }
856  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
857  if (ret < 0)
858  goto finish;
859  idx++;
860  } else
861  write_packet(of, pkt, ost);
862  }
863  } else
864  write_packet(of, pkt, ost);
865 
866 finish:
867  if (ret < 0 && ret != AVERROR_EOF) {
868  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
869  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
870  if(exit_on_error)
871  exit_program(1);
872  }
873 }
874 
876 {
877  OutputFile *of = output_files[ost->file_index];
878 
879  if (of->recording_time != INT64_MAX &&
881  AV_TIME_BASE_Q) >= 0) {
882  close_output_stream(ost);
883  return 0;
884  }
885  return 1;
886 }
887 
888 static void do_audio_out(OutputFile *of, OutputStream *ost,
889  AVFrame *frame)
890 {
891  AVCodecContext *enc = ost->enc_ctx;
892  AVPacket pkt;
893  int ret;
894 
895  av_init_packet(&pkt);
896  pkt.data = NULL;
897  pkt.size = 0;
898 
899  if (!check_recording_time(ost))
900  return;
901 
902  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
903  frame->pts = ost->sync_opts;
904  ost->sync_opts = frame->pts + frame->nb_samples;
905  ost->samples_encoded += frame->nb_samples;
906  ost->frames_encoded++;
907 
908  av_assert0(pkt.size || !pkt.data);
910  if (debug_ts) {
911  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
912  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
913  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
914  enc->time_base.num, enc->time_base.den);
915  }
916 
917  ret = avcodec_send_frame(enc, frame);
918  if (ret < 0)
919  goto error;
920 
921  while (1) {
922  ret = avcodec_receive_packet(enc, &pkt);
923  if (ret == AVERROR(EAGAIN))
924  break;
925  if (ret < 0)
926  goto error;
927 
928  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
929 
930  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
931 
932  if (debug_ts) {
933  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
934  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
935  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
936  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
937  }
938 
939  output_packet(of, &pkt, ost);
940  }
941 
942  return;
943 error:
944  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
945  exit_program(1);
946 }
947 
948 static void do_subtitle_out(OutputFile *of,
949  OutputStream *ost,
950  AVSubtitle *sub)
951 {
952  int subtitle_out_max_size = 1024 * 1024;
953  int subtitle_out_size, nb, i;
954  AVCodecContext *enc;
955  AVPacket pkt;
956  int64_t pts;
957 
958  if (sub->pts == AV_NOPTS_VALUE) {
959  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
960  if (exit_on_error)
961  exit_program(1);
962  return;
963  }
964 
965  enc = ost->enc_ctx;
966 
967  if (!subtitle_out) {
968  subtitle_out = av_malloc(subtitle_out_max_size);
969  if (!subtitle_out) {
970  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
971  exit_program(1);
972  }
973  }
974 
975  /* Note: DVB subtitle need one packet to draw them and one other
976  packet to clear them */
977  /* XXX: signal it in the codec context ? */
979  nb = 2;
980  else
981  nb = 1;
982 
983  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
984  pts = sub->pts;
985  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
986  pts -= output_files[ost->file_index]->start_time;
987  for (i = 0; i < nb; i++) {
988  unsigned save_num_rects = sub->num_rects;
989 
990  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
991  if (!check_recording_time(ost))
992  return;
993 
994  sub->pts = pts;
995  // start_display_time is required to be 0
996  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
998  sub->start_display_time = 0;
999  if (i == 1)
1000  sub->num_rects = 0;
1001 
1002  ost->frames_encoded++;
1003 
1004  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1005  subtitle_out_max_size, sub);
1006  if (i == 1)
1007  sub->num_rects = save_num_rects;
1008  if (subtitle_out_size < 0) {
1009  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1010  exit_program(1);
1011  }
1012 
1013  av_init_packet(&pkt);
1014  pkt.data = subtitle_out;
1015  pkt.size = subtitle_out_size;
1016  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1017  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1018  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1019  /* XXX: the pts correction is handled here. Maybe handling
1020  it in the codec would be better */
1021  if (i == 0)
1022  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1023  else
1024  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1025  }
1026  pkt.dts = pkt.pts;
1027  output_packet(of, &pkt, ost);
1028  }
1029 }
1030 
1031 static void do_video_out(OutputFile *of,
1032  OutputStream *ost,
1033  AVFrame *next_picture,
1034  double sync_ipts)
1035 {
1036  int ret, format_video_sync;
1037  AVPacket pkt;
1038  AVCodecContext *enc = ost->enc_ctx;
1039  AVCodecParameters *mux_par = ost->st->codecpar;
1040  AVRational frame_rate;
1041  int nb_frames, nb0_frames, i;
1042  double delta, delta0;
1043  double duration = 0;
1044  int frame_size = 0;
1045  InputStream *ist = NULL;
1047 
1048  if (ost->source_index >= 0)
1049  ist = input_streams[ost->source_index];
1050 
1051  frame_rate = av_buffersink_get_frame_rate(filter);
1052  if (frame_rate.num > 0 && frame_rate.den > 0)
1053  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1054 
1055  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1056  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1057 
1058  if (!ost->filters_script &&
1059  !ost->filters &&
1060  next_picture &&
1061  ist &&
1062  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1063  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1064  }
1065 
1066  if (!next_picture) {
1067  //end, flushing
1068  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1069  ost->last_nb0_frames[1],
1070  ost->last_nb0_frames[2]);
1071  } else {
1072  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1073  delta = delta0 + duration;
1074 
1075  /* by default, we output a single frame */
1076  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1077  nb_frames = 1;
1078 
1079  format_video_sync = video_sync_method;
1080  if (format_video_sync == VSYNC_AUTO) {
1081  if(!strcmp(of->ctx->oformat->name, "avi")) {
1082  format_video_sync = VSYNC_VFR;
1083  } else
1084  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1085  if ( ist
1086  && format_video_sync == VSYNC_CFR
1087  && input_files[ist->file_index]->ctx->nb_streams == 1
1088  && input_files[ist->file_index]->input_ts_offset == 0) {
1089  format_video_sync = VSYNC_VSCFR;
1090  }
1091  if (format_video_sync == VSYNC_CFR && copy_ts) {
1092  format_video_sync = VSYNC_VSCFR;
1093  }
1094  }
1095  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1096 
1097  if (delta0 < 0 &&
1098  delta > 0 &&
1099  format_video_sync != VSYNC_PASSTHROUGH &&
1100  format_video_sync != VSYNC_DROP) {
1101  if (delta0 < -0.6) {
1102  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1103  } else
1104  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1105  sync_ipts = ost->sync_opts;
1106  duration += delta0;
1107  delta0 = 0;
1108  }
1109 
1110  switch (format_video_sync) {
1111  case VSYNC_VSCFR:
1112  if (ost->frame_number == 0 && delta0 >= 0.5) {
1113  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1114  delta = duration;
1115  delta0 = 0;
1116  ost->sync_opts = lrint(sync_ipts);
1117  }
1118  case VSYNC_CFR:
1119  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1120  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1121  nb_frames = 0;
1122  } else if (delta < -1.1)
1123  nb_frames = 0;
1124  else if (delta > 1.1) {
1125  nb_frames = lrintf(delta);
1126  if (delta0 > 1.1)
1127  nb0_frames = lrintf(delta0 - 0.6);
1128  }
1129  break;
1130  case VSYNC_VFR:
1131  if (delta <= -0.6)
1132  nb_frames = 0;
1133  else if (delta > 0.6)
1134  ost->sync_opts = lrint(sync_ipts);
1135  break;
1136  case VSYNC_DROP:
1137  case VSYNC_PASSTHROUGH:
1138  ost->sync_opts = lrint(sync_ipts);
1139  break;
1140  default:
1141  av_assert0(0);
1142  }
1143  }
1144 
1145  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1146  nb0_frames = FFMIN(nb0_frames, nb_frames);
1147 
1148  memmove(ost->last_nb0_frames + 1,
1149  ost->last_nb0_frames,
1150  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1151  ost->last_nb0_frames[0] = nb0_frames;
1152 
1153  if (nb0_frames == 0 && ost->last_dropped) {
1154  nb_frames_drop++;
1156  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1157  ost->frame_number, ost->st->index, ost->last_frame->pts);
1158  }
1159  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1160  if (nb_frames > dts_error_threshold * 30) {
1161  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1162  nb_frames_drop++;
1163  return;
1164  }
1165  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1166  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1167  if (nb_frames_dup > dup_warning) {
1168  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1169  dup_warning *= 10;
1170  }
1171  }
1172  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1173 
1174  /* duplicates frame if needed */
1175  for (i = 0; i < nb_frames; i++) {
1176  AVFrame *in_picture;
1177  av_init_packet(&pkt);
1178  pkt.data = NULL;
1179  pkt.size = 0;
1180 
1181  if (i < nb0_frames && ost->last_frame) {
1182  in_picture = ost->last_frame;
1183  } else
1184  in_picture = next_picture;
1185 
1186  if (!in_picture)
1187  return;
1188 
1189  in_picture->pts = ost->sync_opts;
1190 
1191 #if 1
1192  if (!check_recording_time(ost))
1193 #else
1194  if (ost->frame_number >= ost->max_frames)
1195 #endif
1196  return;
1197 
1198 #if FF_API_LAVF_FMT_RAWPICTURE
1199  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1200  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1201  /* raw pictures are written as AVPicture structure to
1202  avoid any copies. We support temporarily the older
1203  method. */
1204  if (in_picture->interlaced_frame)
1205  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206  else
1207  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208  pkt.data = (uint8_t *)in_picture;
1209  pkt.size = sizeof(AVPicture);
1210  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1211  pkt.flags |= AV_PKT_FLAG_KEY;
1212 
1213  output_packet(of, &pkt, ost);
1214  } else
1215 #endif
1216  {
1217  int forced_keyframe = 0;
1218  double pts_time;
1219 
1221  ost->top_field_first >= 0)
1222  in_picture->top_field_first = !!ost->top_field_first;
1223 
1224  if (in_picture->interlaced_frame) {
1225  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227  else
1228  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229  } else
1230  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 
1232  in_picture->quality = enc->global_quality;
1233  in_picture->pict_type = 0;
1234 
1235  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1236  in_picture->pts * av_q2d(enc->time_base) : NAN;
1237  if (ost->forced_kf_index < ost->forced_kf_count &&
1238  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1239  ost->forced_kf_index++;
1240  forced_keyframe = 1;
1241  } else if (ost->forced_keyframes_pexpr) {
1242  double res;
1243  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1246  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1252  res);
1253  if (res) {
1254  forced_keyframe = 1;
1260  }
1261 
1263  } else if ( ost->forced_keyframes
1264  && !strncmp(ost->forced_keyframes, "source", 6)
1265  && in_picture->key_frame==1) {
1266  forced_keyframe = 1;
1267  }
1268 
1269  if (forced_keyframe) {
1270  in_picture->pict_type = AV_PICTURE_TYPE_I;
1271  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1272  }
1273 
1275  if (debug_ts) {
1276  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1277  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1278  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1279  enc->time_base.num, enc->time_base.den);
1280  }
1281 
1282  ost->frames_encoded++;
1283 
1284  ret = avcodec_send_frame(enc, in_picture);
1285  if (ret < 0)
1286  goto error;
1287 
1288  while (1) {
1289  ret = avcodec_receive_packet(enc, &pkt);
1290  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1291  if (ret == AVERROR(EAGAIN))
1292  break;
1293  if (ret < 0)
1294  goto error;
1295 
1296  if (debug_ts) {
1297  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1298  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1299  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1300  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1301  }
1302 
1303  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1304  pkt.pts = ost->sync_opts;
1305 
1306  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1307 
1308  if (debug_ts) {
1309  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1310  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1311  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1312  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1313  }
1314 
1315  frame_size = pkt.size;
1316  output_packet(of, &pkt, ost);
1317 
1318  /* if two pass, output log */
1319  if (ost->logfile && enc->stats_out) {
1320  fprintf(ost->logfile, "%s", enc->stats_out);
1321  }
1322  }
1323  }
1324  ost->sync_opts++;
1325  /*
1326  * For video, number of frames in == number of packets out.
1327  * But there may be reordering, so we can't throw away frames on encoder
1328  * flush, we need to limit them here, before they go into encoder.
1329  */
1330  ost->frame_number++;
1331 
1332  if (vstats_filename && frame_size)
1333  do_video_stats(ost, frame_size);
1334  }
1335 
1336  if (!ost->last_frame)
1337  ost->last_frame = av_frame_alloc();
1338  av_frame_unref(ost->last_frame);
1339  if (next_picture && ost->last_frame)
1340  av_frame_ref(ost->last_frame, next_picture);
1341  else
1342  av_frame_free(&ost->last_frame);
1343 
1344  return;
1345 error:
1346  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1347  exit_program(1);
1348 }
1349 
1350 static double psnr(double d)
1351 {
1352  return -10.0 * log10(d);
1353 }
1354 
1356 {
1357  AVCodecContext *enc;
1358  int frame_number;
1359  double ti1, bitrate, avg_bitrate;
1360 
1361  /* this is executed just the first time do_video_stats is called */
1362  if (!vstats_file) {
1363  vstats_file = fopen(vstats_filename, "w");
1364  if (!vstats_file) {
1365  perror("fopen");
1366  exit_program(1);
1367  }
1368  }
1369 
1370  enc = ost->enc_ctx;
1371  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1372  frame_number = ost->st->nb_frames;
1373  if (vstats_version <= 1) {
1374  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1375  ost->quality / (float)FF_QP2LAMBDA);
1376  } else {
1377  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1378  ost->quality / (float)FF_QP2LAMBDA);
1379  }
1380 
1381  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1382  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1383 
1384  fprintf(vstats_file,"f_size= %6d ", frame_size);
1385  /* compute pts value */
1386  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1387  if (ti1 < 0.01)
1388  ti1 = 0.01;
1389 
1390  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1391  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1392  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1393  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1394  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1395  }
1396 }
1397 
1398 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1399 
1401 {
1402  OutputFile *of = output_files[ost->file_index];
1403  int i;
1404 
1406 
1407  if (of->shortest) {
1408  for (i = 0; i < of->ctx->nb_streams; i++)
1409  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1410  }
1411 }
1412 
1413 /**
1414  * Get and encode new output from any of the filtergraphs, without causing
1415  * activity.
1416  *
1417  * @return 0 for success, <0 for severe errors
1418  */
1419 static int reap_filters(int flush)
1420 {
1421  AVFrame *filtered_frame = NULL;
1422  int i;
1423 
1424  /* Reap all buffers present in the buffer sinks */
1425  for (i = 0; i < nb_output_streams; i++) {
1426  OutputStream *ost = output_streams[i];
1427  OutputFile *of = output_files[ost->file_index];
1429  AVCodecContext *enc = ost->enc_ctx;
1430  int ret = 0;
1431 
1432  if (!ost->filter || !ost->filter->graph->graph)
1433  continue;
1434  filter = ost->filter->filter;
1435 
1436  if (!ost->initialized) {
1437  char error[1024] = "";
1438  ret = init_output_stream(ost, error, sizeof(error));
1439  if (ret < 0) {
1440  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1441  ost->file_index, ost->index, error);
1442  exit_program(1);
1443  }
1444  }
1445 
1446  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1447  return AVERROR(ENOMEM);
1448  }
1449  filtered_frame = ost->filtered_frame;
1450 
1451  while (1) {
1452  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1453  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1455  if (ret < 0) {
1456  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1458  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1459  } else if (flush && ret == AVERROR_EOF) {
1461  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1462  }
1463  break;
1464  }
1465  if (ost->finished) {
1466  av_frame_unref(filtered_frame);
1467  continue;
1468  }
1469  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1470  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1471  AVRational filter_tb = av_buffersink_get_time_base(filter);
1472  AVRational tb = enc->time_base;
1473  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1474 
1475  tb.den <<= extra_bits;
1476  float_pts =
1477  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1478  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1479  float_pts /= 1 << extra_bits;
1480  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1481  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1482 
1483  filtered_frame->pts =
1484  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1485  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1486  }
1487  //if (ost->source_index >= 0)
1488  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1489 
1490  switch (av_buffersink_get_type(filter)) {
1491  case AVMEDIA_TYPE_VIDEO:
1492  if (!ost->frame_aspect_ratio.num)
1493  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1494 
1495  if (debug_ts) {
1496  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1497  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1498  float_pts,
1499  enc->time_base.num, enc->time_base.den);
1500  }
1501 
1502  do_video_out(of, ost, filtered_frame, float_pts);
1503  break;
1504  case AVMEDIA_TYPE_AUDIO:
1505  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1506  enc->channels != av_frame_get_channels(filtered_frame)) {
1508  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1509  break;
1510  }
1511  do_audio_out(of, ost, filtered_frame);
1512  break;
1513  default:
1514  // TODO support subtitle filters
1515  av_assert0(0);
1516  }
1517 
1518  av_frame_unref(filtered_frame);
1519  }
1520  }
1521 
1522  return 0;
1523 }
1524 
1525 static void print_final_stats(int64_t total_size)
1526 {
1527  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1528  uint64_t subtitle_size = 0;
1529  uint64_t data_size = 0;
1530  float percent = -1.0;
1531  int i, j;
1532  int pass1_used = 1;
1533 
1534  for (i = 0; i < nb_output_streams; i++) {
1535  OutputStream *ost = output_streams[i];
1536  switch (ost->enc_ctx->codec_type) {
1537  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1538  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1539  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1540  default: other_size += ost->data_size; break;
1541  }
1542  extra_size += ost->enc_ctx->extradata_size;
1543  data_size += ost->data_size;
1546  pass1_used = 0;
1547  }
1548 
1549  if (data_size && total_size>0 && total_size >= data_size)
1550  percent = 100.0 * (total_size - data_size) / data_size;
1551 
1552  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1553  video_size / 1024.0,
1554  audio_size / 1024.0,
1555  subtitle_size / 1024.0,
1556  other_size / 1024.0,
1557  extra_size / 1024.0);
1558  if (percent >= 0.0)
1559  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1560  else
1561  av_log(NULL, AV_LOG_INFO, "unknown");
1562  av_log(NULL, AV_LOG_INFO, "\n");
1563 
1564  /* print verbose per-stream stats */
1565  for (i = 0; i < nb_input_files; i++) {
1566  InputFile *f = input_files[i];
1567  uint64_t total_packets = 0, total_size = 0;
1568 
1569  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1570  i, f->ctx->filename);
1571 
1572  for (j = 0; j < f->nb_streams; j++) {
1573  InputStream *ist = input_streams[f->ist_index + j];
1574  enum AVMediaType type = ist->dec_ctx->codec_type;
1575 
1576  total_size += ist->data_size;
1577  total_packets += ist->nb_packets;
1578 
1579  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1580  i, j, media_type_string(type));
1581  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1582  ist->nb_packets, ist->data_size);
1583 
1584  if (ist->decoding_needed) {
1585  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1586  ist->frames_decoded);
1587  if (type == AVMEDIA_TYPE_AUDIO)
1588  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1589  av_log(NULL, AV_LOG_VERBOSE, "; ");
1590  }
1591 
1592  av_log(NULL, AV_LOG_VERBOSE, "\n");
1593  }
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1596  total_packets, total_size);
1597  }
1598 
1599  for (i = 0; i < nb_output_files; i++) {
1600  OutputFile *of = output_files[i];
1601  uint64_t total_packets = 0, total_size = 0;
1602 
1603  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1604  i, of->ctx->filename);
1605 
1606  for (j = 0; j < of->ctx->nb_streams; j++) {
1607  OutputStream *ost = output_streams[of->ost_index + j];
1608  enum AVMediaType type = ost->enc_ctx->codec_type;
1609 
1610  total_size += ost->data_size;
1611  total_packets += ost->packets_written;
1612 
1613  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1614  i, j, media_type_string(type));
1615  if (ost->encoding_needed) {
1616  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1617  ost->frames_encoded);
1618  if (type == AVMEDIA_TYPE_AUDIO)
1619  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1620  av_log(NULL, AV_LOG_VERBOSE, "; ");
1621  }
1622 
1623  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1624  ost->packets_written, ost->data_size);
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "\n");
1627  }
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1630  total_packets, total_size);
1631  }
1632  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1633  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1634  if (pass1_used) {
1635  av_log(NULL, AV_LOG_WARNING, "\n");
1636  } else {
1637  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1638  }
1639  }
1640 }
1641 
1642 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1643 {
1644  char buf[1024];
1645  AVBPrint buf_script;
1646  OutputStream *ost;
1647  AVFormatContext *oc;
1648  int64_t total_size;
1649  AVCodecContext *enc;
1650  int frame_number, vid, i;
1651  double bitrate;
1652  double speed;
1653  int64_t pts = INT64_MIN + 1;
1654  static int64_t last_time = -1;
1655  static int qp_histogram[52];
1656  int hours, mins, secs, us;
1657  int ret;
1658  float t;
1659 
1660  if (!print_stats && !is_last_report && !progress_avio)
1661  return;
1662 
1663  if (!is_last_report) {
1664  if (last_time == -1) {
1665  last_time = cur_time;
1666  return;
1667  }
1668  if ((cur_time - last_time) < 500000)
1669  return;
1670  last_time = cur_time;
1671  }
1672 
1673  t = (cur_time-timer_start) / 1000000.0;
1674 
1675 
1676  oc = output_files[0]->ctx;
1677 
1678  total_size = avio_size(oc->pb);
1679  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1680  total_size = avio_tell(oc->pb);
1681 
1682  buf[0] = '\0';
1683  vid = 0;
1684  av_bprint_init(&buf_script, 0, 1);
1685  for (i = 0; i < nb_output_streams; i++) {
1686  float q = -1;
1687  ost = output_streams[i];
1688  enc = ost->enc_ctx;
1689  if (!ost->stream_copy)
1690  q = ost->quality / (float) FF_QP2LAMBDA;
1691 
1692  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1693  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1694  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1695  ost->file_index, ost->index, q);
1696  }
1697  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698  float fps;
1699 
1700  frame_number = ost->frame_number;
1701  fps = t > 1 ? frame_number / t : 0;
1702  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1703  frame_number, fps < 9.95, fps, q);
1704  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1705  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1706  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1707  ost->file_index, ost->index, q);
1708  if (is_last_report)
1709  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1710  if (qp_hist) {
1711  int j;
1712  int qp = lrintf(q);
1713  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1714  qp_histogram[qp]++;
1715  for (j = 0; j < 32; j++)
1716  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1717  }
1718 
1719  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1720  int j;
1721  double error, error_sum = 0;
1722  double scale, scale_sum = 0;
1723  double p;
1724  char type[3] = { 'Y','U','V' };
1725  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1726  for (j = 0; j < 3; j++) {
1727  if (is_last_report) {
1728  error = enc->error[j];
1729  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1730  } else {
1731  error = ost->error[j];
1732  scale = enc->width * enc->height * 255.0 * 255.0;
1733  }
1734  if (j)
1735  scale /= 4;
1736  error_sum += error;
1737  scale_sum += scale;
1738  p = psnr(error / scale);
1739  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1740  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1741  ost->file_index, ost->index, type[j] | 32, p);
1742  }
1743  p = psnr(error_sum / scale_sum);
1744  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1745  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1746  ost->file_index, ost->index, p);
1747  }
1748  vid = 1;
1749  }
1750  /* compute min output value */
1752  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1753  ost->st->time_base, AV_TIME_BASE_Q));
1754  if (is_last_report)
1755  nb_frames_drop += ost->last_dropped;
1756  }
1757 
1758  secs = FFABS(pts) / AV_TIME_BASE;
1759  us = FFABS(pts) % AV_TIME_BASE;
1760  mins = secs / 60;
1761  secs %= 60;
1762  hours = mins / 60;
1763  mins %= 60;
1764 
1765  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1766  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1767 
1768  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1769  "size=N/A time=");
1770  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771  "size=%8.0fkB time=", total_size / 1024.0);
1772  if (pts < 0)
1773  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1774  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1776  (100 * us) / AV_TIME_BASE);
1777 
1778  if (bitrate < 0) {
1779  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1780  av_bprintf(&buf_script, "bitrate=N/A\n");
1781  }else{
1782  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1783  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1784  }
1785 
1786  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1787  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1788  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1789  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1790  hours, mins, secs, us);
1791 
1793  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1795  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1796  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1797 
1798  if (speed < 0) {
1799  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1800  av_bprintf(&buf_script, "speed=N/A\n");
1801  } else {
1802  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1803  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1804  }
1805 
1806  if (print_stats || is_last_report) {
1807  const char end = is_last_report ? '\n' : '\r';
1808  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1809  fprintf(stderr, "%s %c", buf, end);
1810  } else
1811  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1812 
1813  fflush(stderr);
1814  }
1815 
1816  if (progress_avio) {
1817  av_bprintf(&buf_script, "progress=%s\n",
1818  is_last_report ? "end" : "continue");
1819  avio_write(progress_avio, buf_script.str,
1820  FFMIN(buf_script.len, buf_script.size - 1));
1821  avio_flush(progress_avio);
1822  av_bprint_finalize(&buf_script, NULL);
1823  if (is_last_report) {
1824  if ((ret = avio_closep(&progress_avio)) < 0)
1826  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1827  }
1828  }
1829 
1830  if (is_last_report)
1831  print_final_stats(total_size);
1832 }
1833 
1834 static void flush_encoders(void)
1835 {
1836  int i, ret;
1837 
1838  for (i = 0; i < nb_output_streams; i++) {
1839  OutputStream *ost = output_streams[i];
1840  AVCodecContext *enc = ost->enc_ctx;
1841  OutputFile *of = output_files[ost->file_index];
1842 
1843  if (!ost->encoding_needed)
1844  continue;
1845 
1846  // Try to enable encoding with no input frames.
1847  // Maybe we should just let encoding fail instead.
1848  if (!ost->initialized) {
1849  FilterGraph *fg = ost->filter->graph;
1850  char error[1024] = "";
1851 
1853  "Finishing stream %d:%d without any data written to it.\n",
1854  ost->file_index, ost->st->index);
1855 
1856  if (ost->filter && !fg->graph) {
1857  int x;
1858  for (x = 0; x < fg->nb_inputs; x++) {
1859  InputFilter *ifilter = fg->inputs[x];
1860  if (ifilter->format < 0) {
1861  AVCodecParameters *par = ifilter->ist->st->codecpar;
1862  // We never got any input. Set a fake format, which will
1863  // come from libavformat.
1864  ifilter->format = par->format;
1865  ifilter->sample_rate = par->sample_rate;
1866  ifilter->channels = par->channels;
1867  ifilter->channel_layout = par->channel_layout;
1868  ifilter->width = par->width;
1869  ifilter->height = par->height;
1870  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1871  }
1872  }
1873 
1875  continue;
1876 
1877  ret = configure_filtergraph(fg);
1878  if (ret < 0) {
1879  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1880  exit_program(1);
1881  }
1882 
1883  finish_output_stream(ost);
1884  }
1885 
1886  ret = init_output_stream(ost, error, sizeof(error));
1887  if (ret < 0) {
1888  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1889  ost->file_index, ost->index, error);
1890  exit_program(1);
1891  }
1892  }
1893 
1894  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1895  continue;
1896 #if FF_API_LAVF_FMT_RAWPICTURE
1897  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1898  continue;
1899 #endif
1900 
1902  continue;
1903 
1904  avcodec_send_frame(enc, NULL);
1905 
1906  for (;;) {
1907  const char *desc = NULL;
1908  AVPacket pkt;
1909  int pkt_size;
1910 
1911  switch (enc->codec_type) {
1912  case AVMEDIA_TYPE_AUDIO:
1913  desc = "audio";
1914  break;
1915  case AVMEDIA_TYPE_VIDEO:
1916  desc = "video";
1917  break;
1918  default:
1919  av_assert0(0);
1920  }
1921 
1922  av_init_packet(&pkt);
1923  pkt.data = NULL;
1924  pkt.size = 0;
1925 
1927  ret = avcodec_receive_packet(enc, &pkt);
1928  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1929  if (ret < 0 && ret != AVERROR_EOF) {
1930  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1931  desc,
1932  av_err2str(ret));
1933  exit_program(1);
1934  }
1935  if (ost->logfile && enc->stats_out) {
1936  fprintf(ost->logfile, "%s", enc->stats_out);
1937  }
1938  if (ret == AVERROR_EOF) {
1939  break;
1940  }
1941  if (ost->finished & MUXER_FINISHED) {
1942  av_packet_unref(&pkt);
1943  continue;
1944  }
1945  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1946  pkt_size = pkt.size;
1947  output_packet(of, &pkt, ost);
1949  do_video_stats(ost, pkt_size);
1950  }
1951  }
1952  }
1953 }
1954 
1955 /*
1956  * Check whether a packet from ist should be written into ost at this time
1957  */
1959 {
1960  OutputFile *of = output_files[ost->file_index];
1961  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1962 
1963  if (ost->source_index != ist_index)
1964  return 0;
1965 
1966  if (ost->finished)
1967  return 0;
1968 
1969  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1970  return 0;
1971 
1972  return 1;
1973 }
1974 
1975 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1976 {
1977  OutputFile *of = output_files[ost->file_index];
1978  InputFile *f = input_files [ist->file_index];
1979  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1980  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1981  AVPicture pict;
1982  AVPacket opkt;
1983 
1984  av_init_packet(&opkt);
1985 
1986  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1988  return;
1989 
1990  if (!ost->frame_number && !ost->copy_prior_start) {
1991  int64_t comp_start = start_time;
1992  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1993  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1994  if (pkt->pts == AV_NOPTS_VALUE ?
1995  ist->pts < comp_start :
1996  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1997  return;
1998  }
1999 
2000  if (of->recording_time != INT64_MAX &&
2001  ist->pts >= of->recording_time + start_time) {
2002  close_output_stream(ost);
2003  return;
2004  }
2005 
2006  if (f->recording_time != INT64_MAX) {
2007  start_time = f->ctx->start_time;
2008  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2009  start_time += f->start_time;
2010  if (ist->pts >= f->recording_time + start_time) {
2011  close_output_stream(ost);
2012  return;
2013  }
2014  }
2015 
2016  /* force the input stream PTS */
2017  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2018  ost->sync_opts++;
2019 
2020  if (pkt->pts != AV_NOPTS_VALUE)
2021  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2022  else
2023  opkt.pts = AV_NOPTS_VALUE;
2024 
2025  if (pkt->dts == AV_NOPTS_VALUE)
2026  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2027  else
2028  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2029  opkt.dts -= ost_tb_start_time;
2030 
2031  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2033  if(!duration)
2034  duration = ist->dec_ctx->frame_size;
2035  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2037  ost->mux_timebase) - ost_tb_start_time;
2038  }
2039 
2040  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2041 
2042  opkt.flags = pkt->flags;
2043  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2044  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2045  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2046  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2047  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2048  ) {
2049  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2050  &opkt.data, &opkt.size,
2051  pkt->data, pkt->size,
2053  if (ret < 0) {
2054  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2055  av_err2str(ret));
2056  exit_program(1);
2057  }
2058  if (ret) {
2059  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2060  if (!opkt.buf)
2061  exit_program(1);
2062  }
2063  } else {
2064  opkt.data = pkt->data;
2065  opkt.size = pkt->size;
2066  }
2067  av_copy_packet_side_data(&opkt, pkt);
2068 
2069 #if FF_API_LAVF_FMT_RAWPICTURE
2070  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2071  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2072  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2073  /* store AVPicture in AVPacket, as expected by the output format */
2074  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2075  if (ret < 0) {
2076  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2077  av_err2str(ret));
2078  exit_program(1);
2079  }
2080  opkt.data = (uint8_t *)&pict;
2081  opkt.size = sizeof(AVPicture);
2082  opkt.flags |= AV_PKT_FLAG_KEY;
2083  }
2084 #endif
2085 
2086  output_packet(of, &opkt, ost);
2087 }
2088 
2090 {
2091  AVCodecContext *dec = ist->dec_ctx;
2092 
2093  if (!dec->channel_layout) {
2094  char layout_name[256];
2095 
2096  if (dec->channels > ist->guess_layout_max)
2097  return 0;
2099  if (!dec->channel_layout)
2100  return 0;
2101  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2102  dec->channels, dec->channel_layout);
2103  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2104  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2105  }
2106  return 1;
2107 }
2108 
2109 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2110 {
2111  if (*got_output || ret<0)
2112  decode_error_stat[ret<0] ++;
2113 
2114  if (ret < 0 && exit_on_error)
2115  exit_program(1);
2116 
2117  if (exit_on_error && *got_output && ist) {
2119  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2120  exit_program(1);
2121  }
2122  }
2123 }
2124 
2125 // Filters can be configured only if the formats of all inputs are known.
2127 {
2128  int i;
2129  for (i = 0; i < fg->nb_inputs; i++) {
2130  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2131  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2132  return 0;
2133  }
2134  return 1;
2135 }
2136 
2138 {
2139  FilterGraph *fg = ifilter->graph;
2140  int need_reinit, ret, i;
2141 
2142  /* determine if the parameters for this input changed */
2143  need_reinit = ifilter->format != frame->format;
2144  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2145  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2146  need_reinit = 1;
2147 
2148  switch (ifilter->ist->st->codecpar->codec_type) {
2149  case AVMEDIA_TYPE_AUDIO:
2150  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2151  ifilter->channels != frame->channels ||
2152  ifilter->channel_layout != frame->channel_layout;
2153  break;
2154  case AVMEDIA_TYPE_VIDEO:
2155  need_reinit |= ifilter->width != frame->width ||
2156  ifilter->height != frame->height;
2157  break;
2158  }
2159 
2160  if (need_reinit) {
2161  ret = ifilter_parameters_from_frame(ifilter, frame);
2162  if (ret < 0)
2163  return ret;
2164  }
2165 
2166  /* (re)init the graph if possible, otherwise buffer the frame and return */
2167  if (need_reinit || !fg->graph) {
2168  for (i = 0; i < fg->nb_inputs; i++) {
2169  if (!ifilter_has_all_input_formats(fg)) {
2170  AVFrame *tmp = av_frame_clone(frame);
2171  if (!tmp)
2172  return AVERROR(ENOMEM);
2173  av_frame_unref(frame);
2174 
2175  if (!av_fifo_space(ifilter->frame_queue)) {
2176  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2177  if (ret < 0)
2178  return ret;
2179  }
2180  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2181  return 0;
2182  }
2183  }
2184 
2185  ret = reap_filters(1);
2186  if (ret < 0 && ret != AVERROR_EOF) {
2187  char errbuf[128];
2188  av_strerror(ret, errbuf, sizeof(errbuf));
2189 
2190  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2191  return ret;
2192  }
2193 
2194  ret = configure_filtergraph(fg);
2195  if (ret < 0) {
2196  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2197  return ret;
2198  }
2199  }
2200 
2202  if (ret < 0) {
2203  av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2204  return ret;
2205  }
2206 
2207  return 0;
2208 }
2209 
2210 static int ifilter_send_eof(InputFilter *ifilter)
2211 {
2212  int i, j, ret;
2213 
2214  ifilter->eof = 1;
2215 
2216  if (ifilter->filter) {
2218  if (ret < 0)
2219  return ret;
2220  } else {
2221  // the filtergraph was never configured
2222  FilterGraph *fg = ifilter->graph;
2223  for (i = 0; i < fg->nb_inputs; i++)
2224  if (!fg->inputs[i]->eof)
2225  break;
2226  if (i == fg->nb_inputs) {
2227  // All the input streams have finished without the filtergraph
2228  // ever being configured.
2229  // Mark the output streams as finished.
2230  for (j = 0; j < fg->nb_outputs; j++)
2231  finish_output_stream(fg->outputs[j]->ost);
2232  }
2233  }
2234 
2235  return 0;
2236 }
2237 
2238 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2239 // There is the following difference: if you got a frame, you must call
2240 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2241 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2242 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2243 {
2244  int ret;
2245 
2246  *got_frame = 0;
2247 
2248  if (pkt) {
2249  ret = avcodec_send_packet(avctx, pkt);
2250  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2251  // decoded frames with avcodec_receive_frame() until done.
2252  if (ret < 0 && ret != AVERROR_EOF)
2253  return ret;
2254  }
2255 
2256  ret = avcodec_receive_frame(avctx, frame);
2257  if (ret < 0 && ret != AVERROR(EAGAIN))
2258  return ret;
2259  if (ret >= 0)
2260  *got_frame = 1;
2261 
2262  return 0;
2263 }
2264 
2265 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2266 {
2267  int i, ret;
2268  AVFrame *f;
2269 
2270  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2271  for (i = 0; i < ist->nb_filters; i++) {
2272  if (i < ist->nb_filters - 1) {
2273  f = ist->filter_frame;
2274  ret = av_frame_ref(f, decoded_frame);
2275  if (ret < 0)
2276  break;
2277  } else
2278  f = decoded_frame;
2279  ret = ifilter_send_frame(ist->filters[i], f);
2280  if (ret == AVERROR_EOF)
2281  ret = 0; /* ignore */
2282  if (ret < 0) {
2284  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2285  break;
2286  }
2287  }
2288  return ret;
2289 }
2290 
2291 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2292  int *decode_failed)
2293 {
2294  AVFrame *decoded_frame;
2295  AVCodecContext *avctx = ist->dec_ctx;
2296  int ret, err = 0;
2297  AVRational decoded_frame_tb;
2298 
2299  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2300  return AVERROR(ENOMEM);
2301  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2302  return AVERROR(ENOMEM);
2303  decoded_frame = ist->decoded_frame;
2304 
2306  ret = decode(avctx, decoded_frame, got_output, pkt);
2307  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2308  if (ret < 0)
2309  *decode_failed = 1;
2310 
2311  if (ret >= 0 && avctx->sample_rate <= 0) {
2312  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2313  ret = AVERROR_INVALIDDATA;
2314  }
2315 
2316  if (ret != AVERROR_EOF)
2317  check_decode_result(ist, got_output, ret);
2318 
2319  if (!*got_output || ret < 0)
2320  return ret;
2321 
2322  ist->samples_decoded += decoded_frame->nb_samples;
2323  ist->frames_decoded++;
2324 
2325 #if 1
2326  /* increment next_dts to use for the case where the input stream does not
2327  have timestamps or there are multiple frames in the packet */
2328  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2329  avctx->sample_rate;
2330  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331  avctx->sample_rate;
2332 #endif
2333 
2334  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2335  decoded_frame_tb = ist->st->time_base;
2336  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2337  decoded_frame->pts = pkt->pts;
2338  decoded_frame_tb = ist->st->time_base;
2339  }else {
2340  decoded_frame->pts = ist->dts;
2341  decoded_frame_tb = AV_TIME_BASE_Q;
2342  }
2343  if (decoded_frame->pts != AV_NOPTS_VALUE)
2344  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2345  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2346  (AVRational){1, avctx->sample_rate});
2347  ist->nb_samples = decoded_frame->nb_samples;
2348  err = send_frame_to_filters(ist, decoded_frame);
2349 
2350  av_frame_unref(ist->filter_frame);
2351  av_frame_unref(decoded_frame);
2352  return err < 0 ? err : ret;
2353 }
2354 
2355 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2356  int *decode_failed)
2357 {
2358  AVFrame *decoded_frame;
2359  int i, ret = 0, err = 0;
2360  int64_t best_effort_timestamp;
2361  int64_t dts = AV_NOPTS_VALUE;
2362  AVPacket avpkt;
2363 
2364  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2365  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2366  // skip the packet.
2367  if (!eof && pkt && pkt->size == 0)
2368  return 0;
2369 
2370  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2371  return AVERROR(ENOMEM);
2372  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2373  return AVERROR(ENOMEM);
2374  decoded_frame = ist->decoded_frame;
2375  if (ist->dts != AV_NOPTS_VALUE)
2376  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2377  if (pkt) {
2378  avpkt = *pkt;
2379  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2380  }
2381 
2382  // The old code used to set dts on the drain packet, which does not work
2383  // with the new API anymore.
2384  if (eof) {
2385  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2386  if (!new)
2387  return AVERROR(ENOMEM);
2388  ist->dts_buffer = new;
2389  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2390  }
2391 
2393  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2394  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2395  if (ret < 0)
2396  *decode_failed = 1;
2397 
2398  // The following line may be required in some cases where there is no parser
2399  // or the parser does not has_b_frames correctly
2400  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2401  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2402  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2403  } else
2405  "video_delay is larger in decoder than demuxer %d > %d.\n"
2406  "If you want to help, upload a sample "
2407  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2408  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2409  ist->dec_ctx->has_b_frames,
2410  ist->st->codecpar->video_delay);
2411  }
2412 
2413  if (ret != AVERROR_EOF)
2414  check_decode_result(ist, got_output, ret);
2415 
2416  if (*got_output && ret >= 0) {
2417  if (ist->dec_ctx->width != decoded_frame->width ||
2418  ist->dec_ctx->height != decoded_frame->height ||
2419  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2420  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2421  decoded_frame->width,
2422  decoded_frame->height,
2423  decoded_frame->format,
2424  ist->dec_ctx->width,
2425  ist->dec_ctx->height,
2426  ist->dec_ctx->pix_fmt);
2427  }
2428  }
2429 
2430  if (!*got_output || ret < 0)
2431  return ret;
2432 
2433  if(ist->top_field_first>=0)
2434  decoded_frame->top_field_first = ist->top_field_first;
2435 
2436  ist->frames_decoded++;
2437 
2438  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2439  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2440  if (err < 0)
2441  goto fail;
2442  }
2443  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2444 
2445  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2446 
2447  if (ist->framerate.num)
2448  best_effort_timestamp = ist->cfr_next_pts++;
2449 
2450  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2451  best_effort_timestamp = ist->dts_buffer[0];
2452 
2453  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2454  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2455  ist->nb_dts_buffer--;
2456  }
2457 
2458  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2459  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2460 
2461  if (ts != AV_NOPTS_VALUE)
2462  ist->next_pts = ist->pts = ts;
2463  }
2464 
2465  if (debug_ts) {
2466  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2467  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2468  ist->st->index, av_ts2str(decoded_frame->pts),
2469  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2470  best_effort_timestamp,
2471  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2472  decoded_frame->key_frame, decoded_frame->pict_type,
2473  ist->st->time_base.num, ist->st->time_base.den);
2474  }
2475 
2476  if (ist->st->sample_aspect_ratio.num)
2477  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2478 
2479  err = send_frame_to_filters(ist, decoded_frame);
2480 
2481 fail:
2483  av_frame_unref(decoded_frame);
2484  return err < 0 ? err : ret;
2485 }
2486 
2487 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2488  int *decode_failed)
2489 {
2490  AVSubtitle subtitle;
2491  int free_sub = 1;
2492  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2493  &subtitle, got_output, pkt);
2494 
2495  check_decode_result(NULL, got_output, ret);
2496 
2497  if (ret < 0 || !*got_output) {
2498  *decode_failed = 1;
2499  if (!pkt->size)
2500  sub2video_flush(ist);
2501  return ret;
2502  }
2503 
2504  if (ist->fix_sub_duration) {
2505  int end = 1;
2506  if (ist->prev_sub.got_output) {
2507  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2508  1000, AV_TIME_BASE);
2509  if (end < ist->prev_sub.subtitle.end_display_time) {
2510  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2511  "Subtitle duration reduced from %d to %d%s\n",
2513  end <= 0 ? ", dropping it" : "");
2515  }
2516  }
2517  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2518  FFSWAP(int, ret, ist->prev_sub.ret);
2519  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2520  if (end <= 0)
2521  goto out;
2522  }
2523 
2524  if (!*got_output)
2525  return ret;
2526 
2527  if (ist->sub2video.frame) {
2528  sub2video_update(ist, &subtitle);
2529  } else if (ist->nb_filters) {
2530  if (!ist->sub2video.sub_queue)
2531  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2532  if (!ist->sub2video.sub_queue)
2533  exit_program(1);
2534  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2536  if (ret < 0)
2537  exit_program(1);
2538  }
2539  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2540  free_sub = 0;
2541  }
2542 
2543  if (!subtitle.num_rects)
2544  goto out;
2545 
2546  ist->frames_decoded++;
2547 
2548  for (i = 0; i < nb_output_streams; i++) {
2549  OutputStream *ost = output_streams[i];
2550 
2551  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2552  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2553  continue;
2554 
2555  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2556  }
2557 
2558 out:
2559  if (free_sub)
2560  avsubtitle_free(&subtitle);
2561  return ret;
2562 }
2563 
2565 {
2566  int i, ret;
2567  for (i = 0; i < ist->nb_filters; i++) {
2568  ret = ifilter_send_eof(ist->filters[i]);
2569  if (ret < 0)
2570  return ret;
2571  }
2572  return 0;
2573 }
2574 
2575 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2576 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2577 {
2578  int ret = 0, i;
2579  int repeating = 0;
2580  int eof_reached = 0;
2581 
2582  AVPacket avpkt;
2583  if (!ist->saw_first_ts) {
2584  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2585  ist->pts = 0;
2586  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2587  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2588  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2589  }
2590  ist->saw_first_ts = 1;
2591  }
2592 
2593  if (ist->next_dts == AV_NOPTS_VALUE)
2594  ist->next_dts = ist->dts;
2595  if (ist->next_pts == AV_NOPTS_VALUE)
2596  ist->next_pts = ist->pts;
2597 
2598  if (!pkt) {
2599  /* EOF handling */
2600  av_init_packet(&avpkt);
2601  avpkt.data = NULL;
2602  avpkt.size = 0;
2603  } else {
2604  avpkt = *pkt;
2605  }
2606 
2607  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2608  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2609  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2610  ist->next_pts = ist->pts = ist->dts;
2611  }
2612 
2613  // while we have more to decode or while the decoder did output something on EOF
2614  while (ist->decoding_needed) {
2615  int duration = 0;
2616  int got_output = 0;
2617  int decode_failed = 0;
2618 
2619  ist->pts = ist->next_pts;
2620  ist->dts = ist->next_dts;
2621 
2622  switch (ist->dec_ctx->codec_type) {
2623  case AVMEDIA_TYPE_AUDIO:
2624  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2625  &decode_failed);
2626  break;
2627  case AVMEDIA_TYPE_VIDEO:
2628  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2629  &decode_failed);
2630  if (!repeating || !pkt || got_output) {
2631  if (pkt && pkt->duration) {
2632  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2633  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2635  duration = ((int64_t)AV_TIME_BASE *
2636  ist->dec_ctx->framerate.den * ticks) /
2638  }
2639 
2640  if(ist->dts != AV_NOPTS_VALUE && duration) {
2641  ist->next_dts += duration;
2642  }else
2643  ist->next_dts = AV_NOPTS_VALUE;
2644  }
2645 
2646  if (got_output)
2647  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2648  break;
2649  case AVMEDIA_TYPE_SUBTITLE:
2650  if (repeating)
2651  break;
2652  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653  if (!pkt && ret >= 0)
2654  ret = AVERROR_EOF;
2655  break;
2656  default:
2657  return -1;
2658  }
2659 
2660  if (ret == AVERROR_EOF) {
2661  eof_reached = 1;
2662  break;
2663  }
2664 
2665  if (ret < 0) {
2666  if (decode_failed) {
2667  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668  ist->file_index, ist->st->index, av_err2str(ret));
2669  } else {
2670  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2672  }
2673  if (!decode_failed || exit_on_error)
2674  exit_program(1);
2675  break;
2676  }
2677 
2678  if (got_output)
2679  ist->got_output = 1;
2680 
2681  if (!got_output)
2682  break;
2683 
2684  // During draining, we might get multiple output frames in this loop.
2685  // ffmpeg.c does not drain the filter chain on configuration changes,
2686  // which means if we send multiple frames at once to the filters, and
2687  // one of those frames changes configuration, the buffered frames will
2688  // be lost. This can upset certain FATE tests.
2689  // Decode only 1 frame per call on EOF to appease these FATE tests.
2690  // The ideal solution would be to rewrite decoding to use the new
2691  // decoding API in a better way.
2692  if (!pkt)
2693  break;
2694 
2695  repeating = 1;
2696  }
2697 
2698  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699  /* except when looping we need to flush but not to send an EOF */
2700  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701  int ret = send_filter_eof(ist);
2702  if (ret < 0) {
2703  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2704  exit_program(1);
2705  }
2706  }
2707 
2708  /* handle stream copy */
2709  if (!ist->decoding_needed) {
2710  ist->dts = ist->next_dts;
2711  switch (ist->dec_ctx->codec_type) {
2712  case AVMEDIA_TYPE_AUDIO:
2713  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2714  ist->dec_ctx->sample_rate;
2715  break;
2716  case AVMEDIA_TYPE_VIDEO:
2717  if (ist->framerate.num) {
2718  // TODO: Remove work-around for c99-to-c89 issue 7
2719  AVRational time_base_q = AV_TIME_BASE_Q;
2720  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2721  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2722  } else if (pkt->duration) {
2723  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2724  } else if(ist->dec_ctx->framerate.num != 0) {
2725  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2726  ist->next_dts += ((int64_t)AV_TIME_BASE *
2727  ist->dec_ctx->framerate.den * ticks) /
2729  }
2730  break;
2731  }
2732  ist->pts = ist->dts;
2733  ist->next_pts = ist->next_dts;
2734  }
2735  for (i = 0; pkt && i < nb_output_streams; i++) {
2736  OutputStream *ost = output_streams[i];
2737 
2738  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2739  continue;
2740 
2741  do_streamcopy(ist, ost, pkt);
2742  }
2743 
2744  return !eof_reached;
2745 }
2746 
2747 static void print_sdp(void)
2748 {
2749  char sdp[16384];
2750  int i;
2751  int j;
2752  AVIOContext *sdp_pb;
2753  AVFormatContext **avc;
2754 
2755  for (i = 0; i < nb_output_files; i++) {
2756  if (!output_files[i]->header_written)
2757  return;
2758  }
2759 
2760  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2761  if (!avc)
2762  exit_program(1);
2763  for (i = 0, j = 0; i < nb_output_files; i++) {
2764  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2765  avc[j] = output_files[i]->ctx;
2766  j++;
2767  }
2768  }
2769 
2770  if (!j)
2771  goto fail;
2772 
2773  av_sdp_create(avc, j, sdp, sizeof(sdp));
2774 
2775  if (!sdp_filename) {
2776  printf("SDP:\n%s\n", sdp);
2777  fflush(stdout);
2778  } else {
2779  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2780  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2781  } else {
2782  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2783  avio_closep(&sdp_pb);
2785  }
2786  }
2787 
2788 fail:
2789  av_freep(&avc);
2790 }
2791 
2793 {
2794  int i;
2795  for (i = 0; hwaccels[i].name; i++)
2796  if (hwaccels[i].pix_fmt == pix_fmt)
2797  return &hwaccels[i];
2798  return NULL;
2799 }
2800 
2802 {
2803  InputStream *ist = s->opaque;
2804  const enum AVPixelFormat *p;
2805  int ret;
2806 
2807  for (p = pix_fmts; *p != -1; p++) {
2809  const HWAccel *hwaccel;
2810 
2811  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2812  break;
2813 
2814  hwaccel = get_hwaccel(*p);
2815  if (!hwaccel ||
2816  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2817  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2818  continue;
2819 
2820  ret = hwaccel->init(s);
2821  if (ret < 0) {
2822  if (ist->hwaccel_id == hwaccel->id) {
2824  "%s hwaccel requested for input stream #%d:%d, "
2825  "but cannot be initialized.\n", hwaccel->name,
2826  ist->file_index, ist->st->index);
2827  return AV_PIX_FMT_NONE;
2828  }
2829  continue;
2830  }
2831 
2832  if (ist->hw_frames_ctx) {
2834  if (!s->hw_frames_ctx)
2835  return AV_PIX_FMT_NONE;
2836  }
2837 
2838  ist->active_hwaccel_id = hwaccel->id;
2839  ist->hwaccel_pix_fmt = *p;
2840  break;
2841  }
2842 
2843  return *p;
2844 }
2845 
2847 {
2848  InputStream *ist = s->opaque;
2849 
2850  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2851  return ist->hwaccel_get_buffer(s, frame, flags);
2852 
2853  return avcodec_default_get_buffer2(s, frame, flags);
2854 }
2855 
2856 static int init_input_stream(int ist_index, char *error, int error_len)
2857 {
2858  int ret;
2859  InputStream *ist = input_streams[ist_index];
2860 
2861  if (ist->decoding_needed) {
2862  AVCodec *codec = ist->dec;
2863  if (!codec) {
2864  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2865  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2866  return AVERROR(EINVAL);
2867  }
2868 
2869  ist->dec_ctx->opaque = ist;
2870  ist->dec_ctx->get_format = get_format;
2871  ist->dec_ctx->get_buffer2 = get_buffer;
2872  ist->dec_ctx->thread_safe_callbacks = 1;
2873 
2874  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2875  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2876  (ist->decoding_needed & DECODING_FOR_OST)) {
2877  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2879  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2880  }
2881 
2882  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2883 
2884  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2885  * audio, and video decoders such as cuvid or mediacodec */
2887 
2888  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2889  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2890  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2891  if (ret == AVERROR_EXPERIMENTAL)
2892  abort_codec_experimental(codec, 0);
2893 
2894  snprintf(error, error_len,
2895  "Error while opening decoder for input stream "
2896  "#%d:%d : %s",
2897  ist->file_index, ist->st->index, av_err2str(ret));
2898  return ret;
2899  }
2901  }
2902 
2903  ist->next_pts = AV_NOPTS_VALUE;
2904  ist->next_dts = AV_NOPTS_VALUE;
2905 
2906  return 0;
2907 }
2908 
2910 {
2911  if (ost->source_index >= 0)
2912  return input_streams[ost->source_index];
2913  return NULL;
2914 }
2915 
2916 static int compare_int64(const void *a, const void *b)
2917 {
2918  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2919 }
2920 
2921 /* open the muxer when all the streams are initialized */
2922 static int check_init_output_file(OutputFile *of, int file_index)
2923 {
2924  int ret, i;
2925 
2926  for (i = 0; i < of->ctx->nb_streams; i++) {
2927  OutputStream *ost = output_streams[of->ost_index + i];
2928  if (!ost->initialized)
2929  return 0;
2930  }
2931 
2932  of->ctx->interrupt_callback = int_cb;
2933 
2934  ret = avformat_write_header(of->ctx, &of->opts);
2935  if (ret < 0) {
2937  "Could not write header for output file #%d "
2938  "(incorrect codec parameters ?): %s\n",
2939  file_index, av_err2str(ret));
2940  return ret;
2941  }
2942  //assert_avoptions(of->opts);
2943  of->header_written = 1;
2944 
2945  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2946 
2947  if (sdp_filename || want_sdp)
2948  print_sdp();
2949 
2950  /* flush the muxing queues */
2951  for (i = 0; i < of->ctx->nb_streams; i++) {
2952  OutputStream *ost = output_streams[of->ost_index + i];
2953 
2954  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2955  if (!av_fifo_size(ost->muxing_queue))
2956  ost->mux_timebase = ost->st->time_base;
2957 
2958  while (av_fifo_size(ost->muxing_queue)) {
2959  AVPacket pkt;
2960  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2961  write_packet(of, &pkt, ost);
2962  }
2963  }
2964 
2965  return 0;
2966 }
2967 
2969 {
2970  AVBSFContext *ctx;
2971  int i, ret;
2972 
2973  if (!ost->nb_bitstream_filters)
2974  return 0;
2975 
2976  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2977  ctx = ost->bsf_ctx[i];
2978 
2979  ret = avcodec_parameters_copy(ctx->par_in,
2980  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2981  if (ret < 0)
2982  return ret;
2983 
2984  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2985 
2986  ret = av_bsf_init(ctx);
2987  if (ret < 0) {
2988  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2989  ost->bsf_ctx[i]->filter->name);
2990  return ret;
2991  }
2992  }
2993 
2994  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2995  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2996  if (ret < 0)
2997  return ret;
2998 
2999  ost->st->time_base = ctx->time_base_out;
3000 
3001  return 0;
3002 }
3003 
3005 {
3006  OutputFile *of = output_files[ost->file_index];
3007  InputStream *ist = get_input_stream(ost);
3008  AVCodecParameters *par_dst = ost->st->codecpar;
3009  AVCodecParameters *par_src = ost->ref_par;
3010  AVRational sar;
3011  int i, ret;
3012  uint32_t codec_tag = par_dst->codec_tag;
3013 
3014  av_assert0(ist && !ost->filter);
3015 
3016  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3017  if (ret >= 0)
3018  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3019  if (ret < 0) {
3021  "Error setting up codec context options.\n");
3022  return ret;
3023  }
3025 
3026  if (!codec_tag) {
3027  unsigned int codec_tag_tmp;
3028  if (!of->ctx->oformat->codec_tag ||
3029  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3030  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3031  codec_tag = par_src->codec_tag;
3032  }
3033 
3034  ret = avcodec_parameters_copy(par_dst, par_src);
3035  if (ret < 0)
3036  return ret;
3037 
3038  par_dst->codec_tag = codec_tag;
3039 
3040  if (!ost->frame_rate.num)
3041  ost->frame_rate = ist->framerate;
3042  ost->st->avg_frame_rate = ost->frame_rate;
3043 
3045  if (ret < 0)
3046  return ret;
3047 
3048  // copy timebase while removing common factors
3049  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3051 
3052  // copy estimated duration as a hint to the muxer
3053  if (ost->st->duration <= 0 && ist->st->duration > 0)
3054  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3055 
3056  // copy disposition
3057  ost->st->disposition = ist->st->disposition;
3058 
3059  if (ist->st->nb_side_data) {
3060  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3061  sizeof(*ist->st->side_data));
3062  if (!ost->st->side_data)
3063  return AVERROR(ENOMEM);
3064 
3065  ost->st->nb_side_data = 0;
3066  for (i = 0; i < ist->st->nb_side_data; i++) {
3067  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3068  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3069 
3070  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
3071  continue;
3072 
3073  sd_dst->data = av_malloc(sd_src->size);
3074  if (!sd_dst->data)
3075  return AVERROR(ENOMEM);
3076  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3077  sd_dst->size = sd_src->size;
3078  sd_dst->type = sd_src->type;
3079  ost->st->nb_side_data++;
3080  }
3081  }
3082 
3083  ost->parser = av_parser_init(par_dst->codec_id);
3084  ost->parser_avctx = avcodec_alloc_context3(NULL);
3085  if (!ost->parser_avctx)
3086  return AVERROR(ENOMEM);
3087 
3088  switch (par_dst->codec_type) {
3089  case AVMEDIA_TYPE_AUDIO:
3090  if (audio_volume != 256) {
3091  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3092  exit_program(1);
3093  }
3094  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3095  par_dst->block_align= 0;
3096  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3097  par_dst->block_align= 0;
3098  break;
3099  case AVMEDIA_TYPE_VIDEO:
3100  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3101  sar =
3102  av_mul_q(ost->frame_aspect_ratio,
3103  (AVRational){ par_dst->height, par_dst->width });
3104  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3105  "with stream copy may produce invalid files\n");
3106  }
3107  else if (ist->st->sample_aspect_ratio.num)
3108  sar = ist->st->sample_aspect_ratio;
3109  else
3110  sar = par_src->sample_aspect_ratio;
3111  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3112  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3113  ost->st->r_frame_rate = ist->st->r_frame_rate;
3114  break;
3115  }
3116 
3117  ost->mux_timebase = ist->st->time_base;
3118 
3119  return 0;
3120 }
3121 
3123 {
3124  AVDictionaryEntry *e;
3125 
3126  uint8_t *encoder_string;
3127  int encoder_string_len;
3128  int format_flags = 0;
3129  int codec_flags = 0;
3130 
3131  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3132  return;
3133 
3134  e = av_dict_get(of->opts, "fflags", NULL, 0);
3135  if (e) {
3136  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3137  if (!o)
3138  return;
3139  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3140  }
3141  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3142  if (e) {
3143  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3144  if (!o)
3145  return;
3146  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3147  }
3148 
3149  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3150  encoder_string = av_mallocz(encoder_string_len);
3151  if (!encoder_string)
3152  exit_program(1);
3153 
3154  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3155  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3156  else
3157  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3158  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3159  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3161 }
3162 
3163 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3164  AVCodecContext *avctx)
3165 {
3166  char *p;
3167  int n = 1, i, size, index = 0;
3168  int64_t t, *pts;
3169 
3170  for (p = kf; *p; p++)
3171  if (*p == ',')
3172  n++;
3173  size = n;
3174  pts = av_malloc_array(size, sizeof(*pts));
3175  if (!pts) {
3176  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3177  exit_program(1);
3178  }
3179 
3180  p = kf;
3181  for (i = 0; i < n; i++) {
3182  char *next = strchr(p, ',');
3183 
3184  if (next)
3185  *next++ = 0;
3186 
3187  if (!memcmp(p, "chapters", 8)) {
3188 
3189  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3190  int j;
3191 
3192  if (avf->nb_chapters > INT_MAX - size ||
3193  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3194  sizeof(*pts)))) {
3196  "Could not allocate forced key frames array.\n");
3197  exit_program(1);
3198  }
3199  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3200  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3201 
3202  for (j = 0; j < avf->nb_chapters; j++) {
3203  AVChapter *c = avf->chapters[j];
3204  av_assert1(index < size);
3205  pts[index++] = av_rescale_q(c->start, c->time_base,
3206  avctx->time_base) + t;
3207  }
3208 
3209  } else {
3210 
3211  t = parse_time_or_die("force_key_frames", p, 1);
3212  av_assert1(index < size);
3213  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3214 
3215  }
3216 
3217  p = next;
3218  }
3219 
3220  av_assert0(index == size);
3221  qsort(pts, size, sizeof(*pts), compare_int64);
3222  ost->forced_kf_count = size;
3223  ost->forced_kf_pts = pts;
3224 }
3225 
3227 {
3228  InputStream *ist = get_input_stream(ost);
3229  AVCodecContext *enc_ctx = ost->enc_ctx;
3231  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3232  int j, ret;
3233 
3234  set_encoder_id(output_files[ost->file_index], ost);
3235 
3236  if (ist) {
3237  ost->st->disposition = ist->st->disposition;
3238 
3239  dec_ctx = ist->dec_ctx;
3240 
3241  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3242  } else {
3243  for (j = 0; j < oc->nb_streams; j++) {
3244  AVStream *st = oc->streams[j];
3245  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3246  break;
3247  }
3248  if (j == oc->nb_streams)
3249  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3252  }
3253 
3254  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3255  if (!ost->frame_rate.num)
3257  if (ist && !ost->frame_rate.num)
3258  ost->frame_rate = ist->framerate;
3259  if (ist && !ost->frame_rate.num)
3260  ost->frame_rate = ist->st->r_frame_rate;
3261  if (ist && !ost->frame_rate.num) {
3262  ost->frame_rate = (AVRational){25, 1};
3264  "No information "
3265  "about the input framerate is available. Falling "
3266  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3267  "if you want a different framerate.\n",
3268  ost->file_index, ost->index);
3269  }
3270 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3271  if (ost->enc->supported_framerates && !ost->force_fps) {
3272  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3273  ost->frame_rate = ost->enc->supported_framerates[idx];
3274  }
3275  // reduce frame rate for mpeg4 to be within the spec limits
3276  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3277  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3278  ost->frame_rate.num, ost->frame_rate.den, 65535);
3279  }
3280  }
3281 
3282  switch (enc_ctx->codec_type) {
3283  case AVMEDIA_TYPE_AUDIO:
3285  if (dec_ctx)
3286  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3287  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3291  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3292  break;
3293  case AVMEDIA_TYPE_VIDEO:
3294  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3295  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3297  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3299  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3300  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3301  }
3302  for (j = 0; j < ost->forced_kf_count; j++)
3303  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3305  enc_ctx->time_base);
3306 
3307  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3308  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3309  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3310  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3311  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3313  if (!strncmp(ost->enc->name, "libx264", 7) &&
3314  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3317  "No pixel format specified, %s for H.264 encoding chosen.\n"
3318  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3320  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3321  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3324  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3325  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3327  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3328  if (dec_ctx)
3329  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3330  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3331 
3332  enc_ctx->framerate = ost->frame_rate;
3333 
3334  ost->st->avg_frame_rate = ost->frame_rate;
3335 
3336  if (!dec_ctx ||
3337  enc_ctx->width != dec_ctx->width ||
3338  enc_ctx->height != dec_ctx->height ||
3339  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3341  }
3342 
3343  if (ost->forced_keyframes) {
3344  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3347  if (ret < 0) {
3349  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3350  return ret;
3351  }
3356 
3357  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3358  // parse it only for static kf timings
3359  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3361  }
3362  }
3363  break;
3364  case AVMEDIA_TYPE_SUBTITLE:
3365  enc_ctx->time_base = AV_TIME_BASE_Q;
3366  if (!enc_ctx->width) {
3367  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3368  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3369  }
3370  break;
3371  case AVMEDIA_TYPE_DATA:
3372  break;
3373  default:
3374  abort();
3375  break;
3376  }
3377 
3378  ost->mux_timebase = enc_ctx->time_base;
3379 
3380  return 0;
3381 }
3382 
3383 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3384 {
3385  int ret = 0;
3386 
3387  if (ost->encoding_needed) {
3388  AVCodec *codec = ost->enc;
3389  AVCodecContext *dec = NULL;
3390  InputStream *ist;
3391 
3392  ret = init_output_stream_encode(ost);
3393  if (ret < 0)
3394  return ret;
3395 
3396  if ((ist = get_input_stream(ost)))
3397  dec = ist->dec_ctx;
3398  if (dec && dec->subtitle_header) {
3399  /* ASS code assumes this buffer is null terminated so add extra byte. */
3401  if (!ost->enc_ctx->subtitle_header)
3402  return AVERROR(ENOMEM);
3403  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3405  }
3406  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3407  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3408  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3409  !codec->defaults &&
3410  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3411  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3412  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3413 
3414  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3416  if (!ost->enc_ctx->hw_frames_ctx)
3417  return AVERROR(ENOMEM);
3418  }
3419 
3420  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3421  if (ret == AVERROR_EXPERIMENTAL)
3422  abort_codec_experimental(codec, 1);
3423  snprintf(error, error_len,
3424  "Error while opening encoder for output stream #%d:%d - "
3425  "maybe incorrect parameters such as bit_rate, rate, width or height",
3426  ost->file_index, ost->index);
3427  return ret;
3428  }
3429  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3430  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3432  ost->enc_ctx->frame_size);
3434  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3435  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3436  " It takes bits/s as argument, not kbits/s\n");
3437 
3439  if (ret < 0) {
3441  "Error initializing the output stream codec context.\n");
3442  exit_program(1);
3443  }
3444  /*
3445  * FIXME: ost->st->codec should't be needed here anymore.
3446  */
3447  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3448  if (ret < 0)
3449  return ret;
3450 
3451  if (ost->enc_ctx->nb_coded_side_data) {
3452  int i;
3453 
3455  sizeof(*ost->st->side_data));
3456  if (!ost->st->side_data)
3457  return AVERROR(ENOMEM);
3458 
3459  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3460  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3461  AVPacketSideData *sd_dst = &ost->st->side_data[i];
3462 
3463  sd_dst->data = av_malloc(sd_src->size);
3464  if (!sd_dst->data)
3465  return AVERROR(ENOMEM);
3466  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3467  sd_dst->size = sd_src->size;
3468  sd_dst->type = sd_src->type;
3469  ost->st->nb_side_data++;
3470  }
3471  }
3472 
3473  // copy timebase while removing common factors
3474  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3475  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3476 
3477  // copy estimated duration as a hint to the muxer
3478  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3479  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3480 
3481  ost->st->codec->codec= ost->enc_ctx->codec;
3482  } else if (ost->stream_copy) {
3483  ret = init_output_stream_streamcopy(ost);
3484  if (ret < 0)
3485  return ret;
3486 
3487  /*
3488  * FIXME: will the codec context used by the parser during streamcopy
3489  * This should go away with the new parser API.
3490  */
3491  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3492  if (ret < 0)
3493  return ret;
3494  }
3495 
3496  // parse user provided disposition, and update stream values
3497  if (ost->disposition) {
3498  static const AVOption opts[] = {
3499  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3500  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3501  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3502  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3503  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3504  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3505  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3506  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3507  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3508  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3509  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3510  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3511  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3512  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3513  { NULL },
3514  };
3515  static const AVClass class = {
3516  .class_name = "",
3517  .item_name = av_default_item_name,
3518  .option = opts,
3519  .version = LIBAVUTIL_VERSION_INT,
3520  };
3521  const AVClass *pclass = &class;
3522 
3523  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3524  if (ret < 0)
3525  return ret;
3526  }
3527 
3528  /* initialize bitstream filters for the output stream
3529  * needs to be done here, because the codec id for streamcopy is not
3530  * known until now */
3531  ret = init_output_bsfs(ost);
3532  if (ret < 0)
3533  return ret;
3534 
3535  ost->initialized = 1;
3536 
3537  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3538  if (ret < 0)
3539  return ret;
3540 
3541  return ret;
3542 }
3543 
3544 static void report_new_stream(int input_index, AVPacket *pkt)
3545 {
3546  InputFile *file = input_files[input_index];
3547  AVStream *st = file->ctx->streams[pkt->stream_index];
3548 
3549  if (pkt->stream_index < file->nb_streams_warn)
3550  return;
3551  av_log(file->ctx, AV_LOG_WARNING,
3552  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3554  input_index, pkt->stream_index,
3555  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3556  file->nb_streams_warn = pkt->stream_index + 1;
3557 }
3558 
3559 static int transcode_init(void)
3560 {
3561  int ret = 0, i, j, k;
3562  AVFormatContext *oc;
3563  OutputStream *ost;
3564  InputStream *ist;
3565  char error[1024] = {0};
3566 
3567  for (i = 0; i < nb_filtergraphs; i++) {
3568  FilterGraph *fg = filtergraphs[i];
3569  for (j = 0; j < fg->nb_outputs; j++) {
3570  OutputFilter *ofilter = fg->outputs[j];
3571  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3572  continue;
3573  if (fg->nb_inputs != 1)
3574  continue;
3575  for (k = nb_input_streams-1; k >= 0 ; k--)
3576  if (fg->inputs[0]->ist == input_streams[k])
3577  break;
3578  ofilter->ost->source_index = k;
3579  }
3580  }
3581 
3582  /* init framerate emulation */
3583  for (i = 0; i < nb_input_files; i++) {
3584  InputFile *ifile = input_files[i];
3585  if (ifile->rate_emu)
3586  for (j = 0; j < ifile->nb_streams; j++)
3587  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3588  }
3589 
3590  /* init input streams */
3591  for (i = 0; i < nb_input_streams; i++)
3592  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3593  for (i = 0; i < nb_output_streams; i++) {
3594  ost = output_streams[i];
3595  avcodec_close(ost->enc_ctx);
3596  }
3597  goto dump_format;
3598  }
3599 
3600  /* open each encoder */
3601  for (i = 0; i < nb_output_streams; i++) {
3602  // skip streams fed from filtergraphs until we have a frame for them
3603  if (output_streams[i]->filter)
3604  continue;
3605 
3606  ret = init_output_stream(output_streams[i], error, sizeof(error));
3607  if (ret < 0)
3608  goto dump_format;
3609  }
3610 
3611  /* discard unused programs */
3612  for (i = 0; i < nb_input_files; i++) {
3613  InputFile *ifile = input_files[i];
3614  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3615  AVProgram *p = ifile->ctx->programs[j];
3616  int discard = AVDISCARD_ALL;
3617 
3618  for (k = 0; k < p->nb_stream_indexes; k++)
3619  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3620  discard = AVDISCARD_DEFAULT;
3621  break;
3622  }
3623  p->discard = discard;
3624  }
3625  }
3626 
3627  /* write headers for files with no streams */
3628  for (i = 0; i < nb_output_files; i++) {
3629  oc = output_files[i]->ctx;
3630  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3631  ret = check_init_output_file(output_files[i], i);
3632  if (ret < 0)
3633  goto dump_format;
3634  }
3635  }
3636 
3637  dump_format:
3638  /* dump the stream mapping */
3639  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3640  for (i = 0; i < nb_input_streams; i++) {
3641  ist = input_streams[i];
3642 
3643  for (j = 0; j < ist->nb_filters; j++) {
3644  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3645  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3646  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3647  ist->filters[j]->name);
3648  if (nb_filtergraphs > 1)
3649  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3650  av_log(NULL, AV_LOG_INFO, "\n");
3651  }
3652  }
3653  }
3654 
3655  for (i = 0; i < nb_output_streams; i++) {
3656  ost = output_streams[i];
3657 
3658  if (ost->attachment_filename) {
3659  /* an attached file */
3660  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3661  ost->attachment_filename, ost->file_index, ost->index);
3662  continue;
3663  }
3664 
3665  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3666  /* output from a complex graph */
3667  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3668  if (nb_filtergraphs > 1)
3669  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3670 
3671  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3672  ost->index, ost->enc ? ost->enc->name : "?");
3673  continue;
3674  }
3675 
3676  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3677  input_streams[ost->source_index]->file_index,
3678  input_streams[ost->source_index]->st->index,
3679  ost->file_index,
3680  ost->index);
3681  if (ost->sync_ist != input_streams[ost->source_index])
3682  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3683  ost->sync_ist->file_index,
3684  ost->sync_ist->st->index);
3685  if (ost->stream_copy)
3686  av_log(NULL, AV_LOG_INFO, " (copy)");
3687  else {
3688  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3689  const AVCodec *out_codec = ost->enc;
3690  const char *decoder_name = "?";
3691  const char *in_codec_name = "?";
3692  const char *encoder_name = "?";
3693  const char *out_codec_name = "?";
3694  const AVCodecDescriptor *desc;
3695 
3696  if (in_codec) {
3697  decoder_name = in_codec->name;
3698  desc = avcodec_descriptor_get(in_codec->id);
3699  if (desc)
3700  in_codec_name = desc->name;
3701  if (!strcmp(decoder_name, in_codec_name))
3702  decoder_name = "native";
3703  }
3704 
3705  if (out_codec) {
3706  encoder_name = out_codec->name;
3707  desc = avcodec_descriptor_get(out_codec->id);
3708  if (desc)
3709  out_codec_name = desc->name;
3710  if (!strcmp(encoder_name, out_codec_name))
3711  encoder_name = "native";
3712  }
3713 
3714  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3715  in_codec_name, decoder_name,
3716  out_codec_name, encoder_name);
3717  }
3718  av_log(NULL, AV_LOG_INFO, "\n");
3719  }
3720 
3721  if (ret) {
3722  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3723  return ret;
3724  }
3725 
3726  transcode_init_done = 1;
3727 
3728  return 0;
3729 }
3730 
3731 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3732 static int need_output(void)
3733 {
3734  int i;
3735 
3736  for (i = 0; i < nb_output_streams; i++) {
3737  OutputStream *ost = output_streams[i];
3738  OutputFile *of = output_files[ost->file_index];
3739  AVFormatContext *os = output_files[ost->file_index]->ctx;
3740 
3741  if (ost->finished ||
3742  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3743  continue;
3744  if (ost->frame_number >= ost->max_frames) {
3745  int j;
3746  for (j = 0; j < of->ctx->nb_streams; j++)
3747  close_output_stream(output_streams[of->ost_index + j]);
3748  continue;
3749  }
3750 
3751  return 1;
3752  }
3753 
3754  return 0;
3755 }
3756 
3757 /**
3758  * Select the output stream to process.
3759  *
3760  * @return selected output stream, or NULL if none available
3761  */
3763 {
3764  int i;
3765  int64_t opts_min = INT64_MAX;
3766  OutputStream *ost_min = NULL;
3767 
3768  for (i = 0; i < nb_output_streams; i++) {
3769  OutputStream *ost = output_streams[i];
3770  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3771  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3772  AV_TIME_BASE_Q);
3773  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3774  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3775 
3776  if (!ost->initialized && !ost->inputs_done)
3777  return ost;
3778 
3779  if (!ost->finished && opts < opts_min) {
3780  opts_min = opts;
3781  ost_min = ost->unavailable ? NULL : ost;
3782  }
3783  }
3784  return ost_min;
3785 }
3786 
3787 static void set_tty_echo(int on)
3788 {
3789 #if HAVE_TERMIOS_H
3790  struct termios tty;
3791  if (tcgetattr(0, &tty) == 0) {
3792  if (on) tty.c_lflag |= ECHO;
3793  else tty.c_lflag &= ~ECHO;
3794  tcsetattr(0, TCSANOW, &tty);
3795  }
3796 #endif
3797 }
3798 
3800 {
3801  int i, ret, key;
3802  static int64_t last_time;
3803  if (received_nb_signals)
3804  return AVERROR_EXIT;
3805  /* read_key() returns 0 on EOF */
3806  if(cur_time - last_time >= 100000 && !run_as_daemon){
3807  key = read_key();
3808  last_time = cur_time;
3809  }else
3810  key = -1;
3811  if (key == 'q')
3812  return AVERROR_EXIT;
3813  if (key == '+') av_log_set_level(av_log_get_level()+10);
3814  if (key == '-') av_log_set_level(av_log_get_level()-10);
3815  if (key == 's') qp_hist ^= 1;
3816  if (key == 'h'){
3817  if (do_hex_dump){
3818  do_hex_dump = do_pkt_dump = 0;
3819  } else if(do_pkt_dump){
3820  do_hex_dump = 1;
3821  } else
3822  do_pkt_dump = 1;
3824  }
3825  if (key == 'c' || key == 'C'){
3826  char buf[4096], target[64], command[256], arg[256] = {0};
3827  double time;
3828  int k, n = 0;
3829  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3830  i = 0;
3831  set_tty_echo(1);
3832  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3833  if (k > 0)
3834  buf[i++] = k;
3835  buf[i] = 0;
3836  set_tty_echo(0);
3837  fprintf(stderr, "\n");
3838  if (k > 0 &&
3839  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3840  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3841  target, time, command, arg);
3842  for (i = 0; i < nb_filtergraphs; i++) {
3843  FilterGraph *fg = filtergraphs[i];
3844  if (fg->graph) {
3845  if (time < 0) {
3846  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3847  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3848  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3849  } else if (key == 'c') {
3850  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3851  ret = AVERROR_PATCHWELCOME;
3852  } else {
3853  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3854  if (ret < 0)
3855  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3856  }
3857  }
3858  }
3859  } else {
3861  "Parse error, at least 3 arguments were expected, "
3862  "only %d given in string '%s'\n", n, buf);
3863  }
3864  }
3865  if (key == 'd' || key == 'D'){
3866  int debug=0;
3867  if(key == 'D') {
3868  debug = input_streams[0]->st->codec->debug<<1;
3869  if(!debug) debug = 1;
3870  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3871  debug += debug;
3872  }else{
3873  char buf[32];
3874  int k = 0;
3875  i = 0;
3876  set_tty_echo(1);
3877  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3878  if (k > 0)
3879  buf[i++] = k;
3880  buf[i] = 0;
3881  set_tty_echo(0);
3882  fprintf(stderr, "\n");
3883  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3884  fprintf(stderr,"error parsing debug value\n");
3885  }
3886  for(i=0;i<nb_input_streams;i++) {
3887  input_streams[i]->st->codec->debug = debug;
3888  }
3889  for(i=0;i<nb_output_streams;i++) {
3890  OutputStream *ost = output_streams[i];
3891  ost->enc_ctx->debug = debug;
3892  }
3893  if(debug) av_log_set_level(AV_LOG_DEBUG);
3894  fprintf(stderr,"debug=%d\n", debug);
3895  }
3896  if (key == '?'){
3897  fprintf(stderr, "key function\n"
3898  "? show this help\n"
3899  "+ increase verbosity\n"
3900  "- decrease verbosity\n"
3901  "c Send command to first matching filter supporting it\n"
3902  "C Send/Queue command to all matching filters\n"
3903  "D cycle through available debug modes\n"
3904  "h dump packets/hex press to cycle through the 3 states\n"
3905  "q quit\n"
3906  "s Show QP histogram\n"
3907  );
3908  }
3909  return 0;
3910 }
3911 
3912 #if HAVE_PTHREADS
3913 static void *input_thread(void *arg)
3914 {
3915  InputFile *f = arg;
3916  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3917  int ret = 0;
3918 
3919  while (1) {
3920  AVPacket pkt;
3921  ret = av_read_frame(f->ctx, &pkt);
3922 
3923  if (ret == AVERROR(EAGAIN)) {
3924  av_usleep(10000);
3925  continue;
3926  }
3927  if (ret < 0) {
3928  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3929  break;
3930  }
3931  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3932  if (flags && ret == AVERROR(EAGAIN)) {
3933  flags = 0;
3934  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3936  "Thread message queue blocking; consider raising the "
3937  "thread_queue_size option (current value: %d)\n",
3938  f->thread_queue_size);
3939  }
3940  if (ret < 0) {
3941  if (ret != AVERROR_EOF)
3942  av_log(f->ctx, AV_LOG_ERROR,
3943  "Unable to send packet to main thread: %s\n",
3944  av_err2str(ret));
3945  av_packet_unref(&pkt);
3946  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3947  break;
3948  }
3949  }
3950 
3951  return NULL;
3952 }
3953 
3954 static void free_input_threads(void)
3955 {
3956  int i;
3957 
3958  for (i = 0; i < nb_input_files; i++) {
3959  InputFile *f = input_files[i];
3960  AVPacket pkt;
3961 
3962  if (!f || !f->in_thread_queue)
3963  continue;
3965  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3966  av_packet_unref(&pkt);
3967 
3968  pthread_join(f->thread, NULL);
3969  f->joined = 1;
3970  av_thread_message_queue_free(&f->in_thread_queue);
3971  }
3972 }
3973 
3974 static int init_input_threads(void)
3975 {
3976  int i, ret;
3977 
3978  if (nb_input_files == 1)
3979  return 0;
3980 
3981  for (i = 0; i < nb_input_files; i++) {
3982  InputFile *f = input_files[i];
3983 
3984  if (f->ctx->pb ? !f->ctx->pb->seekable :
3985  strcmp(f->ctx->iformat->name, "lavfi"))
3986  f->non_blocking = 1;
3987  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3988  f->thread_queue_size, sizeof(AVPacket));
3989  if (ret < 0)
3990  return ret;
3991 
3992  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3993  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3994  av_thread_message_queue_free(&f->in_thread_queue);
3995  return AVERROR(ret);
3996  }
3997  }
3998  return 0;
3999 }
4000 
4001 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4002 {
4003  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4004  f->non_blocking ?
4006 }
4007 #endif
4008 
4010 {
4011  if (f->rate_emu) {
4012  int i;
4013  for (i = 0; i < f->nb_streams; i++) {
4014  InputStream *ist = input_streams[f->ist_index + i];
4015  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4016  int64_t now = av_gettime_relative() - ist->start;
4017  if (pts > now)
4018  return AVERROR(EAGAIN);
4019  }
4020  }
4021 
4022 #if HAVE_PTHREADS
4023  if (nb_input_files > 1)
4024  return get_input_packet_mt(f, pkt);
4025 #endif
4026  return av_read_frame(f->ctx, pkt);
4027 }
4028 
4029 static int got_eagain(void)
4030 {
4031  int i;
4032  for (i = 0; i < nb_output_streams; i++)
4033  if (output_streams[i]->unavailable)
4034  return 1;
4035  return 0;
4036 }
4037 
4038 static void reset_eagain(void)
4039 {
4040  int i;
4041  for (i = 0; i < nb_input_files; i++)
4042  input_files[i]->eagain = 0;
4043  for (i = 0; i < nb_output_streams; i++)
4044  output_streams[i]->unavailable = 0;
4045 }
4046 
4047 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4048 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4049  AVRational time_base)
4050 {
4051  int ret;
4052 
4053  if (!*duration) {
4054  *duration = tmp;
4055  return tmp_time_base;
4056  }
4057 
4058  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4059  if (ret < 0) {
4060  *duration = tmp;
4061  return tmp_time_base;
4062  }
4063 
4064  return time_base;
4065 }
4066 
4068 {
4069  InputStream *ist;
4070  AVCodecContext *avctx;
4071  int i, ret, has_audio = 0;
4072  int64_t duration = 0;
4073 
4074  ret = av_seek_frame(is, -1, is->start_time, 0);
4075  if (ret < 0)
4076  return ret;
4077 
4078  for (i = 0; i < ifile->nb_streams; i++) {
4079  ist = input_streams[ifile->ist_index + i];
4080  avctx = ist->dec_ctx;
4081 
4082  // flush decoders
4083  if (ist->decoding_needed) {
4084  process_input_packet(ist, NULL, 1);
4085  avcodec_flush_buffers(avctx);
4086  }
4087 
4088  /* duration is the length of the last frame in a stream
4089  * when audio stream is present we don't care about
4090  * last video frame length because it's not defined exactly */
4091  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4092  has_audio = 1;
4093  }
4094 
4095  for (i = 0; i < ifile->nb_streams; i++) {
4096  ist = input_streams[ifile->ist_index + i];
4097  avctx = ist->dec_ctx;
4098 
4099  if (has_audio) {
4100  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4101  AVRational sample_rate = {1, avctx->sample_rate};
4102 
4103  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4104  } else
4105  continue;
4106  } else {
4107  if (ist->framerate.num) {
4108  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4109  } else if (ist->st->avg_frame_rate.num) {
4110  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4111  } else duration = 1;
4112  }
4113  if (!ifile->duration)
4114  ifile->time_base = ist->st->time_base;
4115  /* the total duration of the stream, max_pts - min_pts is
4116  * the duration of the stream without the last frame */
4117  duration += ist->max_pts - ist->min_pts;
4118  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4119  ifile->time_base);
4120  }
4121 
4122  if (ifile->loop > 0)
4123  ifile->loop--;
4124 
4125  return ret;
4126 }
4127 
4128 /*
4129  * Return
4130  * - 0 -- one packet was read and processed
4131  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4132  * this function should be called again
4133  * - AVERROR_EOF -- this function should not be called again
4134  */
4135 static int process_input(int file_index)
4136 {
4137  InputFile *ifile = input_files[file_index];
4138  AVFormatContext *is;
4139  InputStream *ist;
4140  AVPacket pkt;
4141  int ret, i, j;
4142  int64_t duration;
4143  int64_t pkt_dts;
4144 
4145  is = ifile->ctx;
4146  ret = get_input_packet(ifile, &pkt);
4147 
4148  if (ret == AVERROR(EAGAIN)) {
4149  ifile->eagain = 1;
4150  return ret;
4151  }
4152  if (ret < 0 && ifile->loop) {
4153  if ((ret = seek_to_start(ifile, is)) < 0)
4154  return ret;
4155  ret = get_input_packet(ifile, &pkt);
4156  if (ret == AVERROR(EAGAIN)) {
4157  ifile->eagain = 1;
4158  return ret;
4159  }
4160  }
4161  if (ret < 0) {
4162  if (ret != AVERROR_EOF) {
4163  print_error(is->filename, ret);
4164  if (exit_on_error)
4165  exit_program(1);
4166  }
4167 
4168  for (i = 0; i < ifile->nb_streams; i++) {
4169  ist = input_streams[ifile->ist_index + i];
4170  if (ist->decoding_needed) {
4171  ret = process_input_packet(ist, NULL, 0);
4172  if (ret>0)
4173  return 0;
4174  }
4175 
4176  /* mark all outputs that don't go through lavfi as finished */
4177  for (j = 0; j < nb_output_streams; j++) {
4178  OutputStream *ost = output_streams[j];
4179 
4180  if (ost->source_index == ifile->ist_index + i &&
4181  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4182  finish_output_stream(ost);
4183  }
4184  }
4185 
4186  ifile->eof_reached = 1;
4187  return AVERROR(EAGAIN);
4188  }
4189 
4190  reset_eagain();
4191 
4192  if (do_pkt_dump) {
4194  is->streams[pkt.stream_index]);
4195  }
4196  /* the following test is needed in case new streams appear
4197  dynamically in stream : we ignore them */
4198  if (pkt.stream_index >= ifile->nb_streams) {
4199  report_new_stream(file_index, &pkt);
4200  goto discard_packet;
4201  }
4202 
4203  ist = input_streams[ifile->ist_index + pkt.stream_index];
4204 
4205  ist->data_size += pkt.size;
4206  ist->nb_packets++;
4207 
4208  if (ist->discard)
4209  goto discard_packet;
4210 
4211  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4212  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4213  exit_program(1);
4214  }
4215 
4216  if (debug_ts) {
4217  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4218  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4222  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4223  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4224  av_ts2str(input_files[ist->file_index]->ts_offset),
4225  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4226  }
4227 
4228  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4229  int64_t stime, stime2;
4230  // Correcting starttime based on the enabled streams
4231  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4232  // so we instead do it here as part of discontinuity handling
4233  if ( ist->next_dts == AV_NOPTS_VALUE
4234  && ifile->ts_offset == -is->start_time
4235  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4236  int64_t new_start_time = INT64_MAX;
4237  for (i=0; i<is->nb_streams; i++) {
4238  AVStream *st = is->streams[i];
4239  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4240  continue;
4241  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4242  }
4243  if (new_start_time > is->start_time) {
4244  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4245  ifile->ts_offset = -new_start_time;
4246  }
4247  }
4248 
4249  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4250  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4251  ist->wrap_correction_done = 1;
4252 
4253  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4254  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4255  ist->wrap_correction_done = 0;
4256  }
4257  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4258  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4259  ist->wrap_correction_done = 0;
4260  }
4261  }
4262 
4263  /* add the stream-global side data to the first packet */
4264  if (ist->nb_packets == 1) {
4265  for (i = 0; i < ist->st->nb_side_data; i++) {
4266  AVPacketSideData *src_sd = &ist->st->side_data[i];
4267  uint8_t *dst_data;
4268 
4269  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4270  continue;
4271  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4272  continue;
4273 
4274  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4275  if (!dst_data)
4276  exit_program(1);
4277 
4278  memcpy(dst_data, src_sd->data, src_sd->size);
4279  }
4280  }
4281 
4282  if (pkt.dts != AV_NOPTS_VALUE)
4283  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4284  if (pkt.pts != AV_NOPTS_VALUE)
4285  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4286 
4287  if (pkt.pts != AV_NOPTS_VALUE)
4288  pkt.pts *= ist->ts_scale;
4289  if (pkt.dts != AV_NOPTS_VALUE)
4290  pkt.dts *= ist->ts_scale;
4291 
4293  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4295  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4296  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4297  int64_t delta = pkt_dts - ifile->last_ts;
4298  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4299  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4300  ifile->ts_offset -= delta;
4302  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4303  delta, ifile->ts_offset);
4304  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4305  if (pkt.pts != AV_NOPTS_VALUE)
4306  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4307  }
4308  }
4309 
4310  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4311  if (pkt.pts != AV_NOPTS_VALUE) {
4312  pkt.pts += duration;
4313  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4314  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4315  }
4316 
4317  if (pkt.dts != AV_NOPTS_VALUE)
4318  pkt.dts += duration;
4319 
4321  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4323  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4324  !copy_ts) {
4325  int64_t delta = pkt_dts - ist->next_dts;
4326  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4327  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4328  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4329  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4330  ifile->ts_offset -= delta;
4332  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4333  delta, ifile->ts_offset);
4334  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4335  if (pkt.pts != AV_NOPTS_VALUE)
4336  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4337  }
4338  } else {
4339  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4340  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4341  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4342  pkt.dts = AV_NOPTS_VALUE;
4343  }
4344  if (pkt.pts != AV_NOPTS_VALUE){
4345  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4346  delta = pkt_pts - ist->next_dts;
4347  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4348  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4349  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4350  pkt.pts = AV_NOPTS_VALUE;
4351  }
4352  }
4353  }
4354  }
4355 
4356  if (pkt.dts != AV_NOPTS_VALUE)
4357  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4358 
4359  if (debug_ts) {
4360  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4362  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4363  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4364  av_ts2str(input_files[ist->file_index]->ts_offset),
4365  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4366  }
4367 
4368  sub2video_heartbeat(ist, pkt.pts);
4369 
4370  process_input_packet(ist, &pkt, 0);
4371 
4372 discard_packet:
4373  av_packet_unref(&pkt);
4374 
4375  return 0;
4376 }
4377 
4378 /**
4379  * Perform a step of transcoding for the specified filter graph.
4380  *
4381  * @param[in] graph filter graph to consider
4382  * @param[out] best_ist input stream where a frame would allow to continue
4383  * @return 0 for success, <0 for error
4384  */
4385 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4386 {
4387  int i, ret;
4388  int nb_requests, nb_requests_max = 0;
4389  InputFilter *ifilter;
4390  InputStream *ist;
4391 
4392  *best_ist = NULL;
4393  ret = avfilter_graph_request_oldest(graph->graph);
4394  if (ret >= 0)
4395  return reap_filters(0);
4396 
4397  if (ret == AVERROR_EOF) {
4398  ret = reap_filters(1);
4399  for (i = 0; i < graph->nb_outputs; i++)
4400  close_output_stream(graph->outputs[i]->ost);
4401  return ret;
4402  }
4403  if (ret != AVERROR(EAGAIN))
4404  return ret;
4405 
4406  for (i = 0; i < graph->nb_inputs; i++) {
4407  ifilter = graph->inputs[i];
4408  ist = ifilter->ist;
4409  if (input_files[ist->file_index]->eagain ||
4410  input_files[ist->file_index]->eof_reached)
4411  continue;
4412  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4413  if (nb_requests > nb_requests_max) {
4414  nb_requests_max = nb_requests;
4415  *best_ist = ist;
4416  }
4417  }
4418 
4419  if (!*best_ist)
4420  for (i = 0; i < graph->nb_outputs; i++)
4421  graph->outputs[i]->ost->unavailable = 1;
4422 
4423  return 0;
4424 }
4425 
4426 /**
4427  * Run a single step of transcoding.
4428  *
4429  * @return 0 for success, <0 for error
4430  */
4431 static int transcode_step(void)
4432 {
4433  OutputStream *ost;
4434  InputStream *ist = NULL;
4435  int ret;
4436 
4437  ost = choose_output();
4438  if (!ost) {
4439  if (got_eagain()) {
4440  reset_eagain();
4441  av_usleep(10000);
4442  return 0;
4443  }
4444  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4445  return AVERROR_EOF;
4446  }
4447 
4448  if (ost->filter && !ost->filter->graph->graph) {
4450  ret = configure_filtergraph(ost->filter->graph);
4451  if (ret < 0) {
4452  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4453  return ret;
4454  }
4455  }
4456  }
4457 
4458  if (ost->filter && ost->filter->graph->graph) {
4459  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4460  return ret;
4461  if (!ist)
4462  return 0;
4463  } else if (ost->filter) {
4464  int i;
4465  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4466  InputFilter *ifilter = ost->filter->graph->inputs[i];
4467  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4468  ist = ifilter->ist;
4469  break;
4470  }
4471  }
4472  if (!ist) {
4473  ost->inputs_done = 1;
4474  return 0;
4475  }
4476  } else {
4477  av_assert0(ost->source_index >= 0);
4478  ist = input_streams[ost->source_index];
4479  }
4480 
4481  ret = process_input(ist->file_index);
4482  if (ret == AVERROR(EAGAIN)) {
4483  if (input_files[ist->file_index]->eagain)
4484  ost->unavailable = 1;
4485  return 0;
4486  }
4487 
4488  if (ret < 0)
4489  return ret == AVERROR_EOF ? 0 : ret;
4490 
4491  return reap_filters(0);
4492 }
4493 
4494 /*
4495  * The following code is the main loop of the file converter
4496  */
4497 static int transcode(void)
4498 {
4499  int ret, i;
4500  AVFormatContext *os;
4501  OutputStream *ost;
4502  InputStream *ist;
4503  int64_t timer_start;
4504  int64_t total_packets_written = 0;
4505 
4506  ret = transcode_init();
4507  if (ret < 0)
4508  goto fail;
4509 
4510  if (stdin_interaction) {
4511  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4512  }
4513 
4514  timer_start = av_gettime_relative();
4515 
4516 #if HAVE_PTHREADS
4517  if ((ret = init_input_threads()) < 0)
4518  goto fail;
4519 #endif
4520 
4521  while (!received_sigterm) {
4522  int64_t cur_time= av_gettime_relative();
4523 
4524  /* if 'q' pressed, exits */
4525  if (stdin_interaction)
4526  if (check_keyboard_interaction(cur_time) < 0)
4527  break;
4528 
4529  /* check if there's any stream where output is still needed */
4530  if (!need_output()) {
4531  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4532  break;
4533  }
4534 
4535  ret = transcode_step();
4536  if (ret < 0 && ret != AVERROR_EOF) {
4537  char errbuf[128];
4538  av_strerror(ret, errbuf, sizeof(errbuf));
4539 
4540  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4541  break;
4542  }
4543 
4544  /* dump report by using the output first video and audio streams */
4545  print_report(0, timer_start, cur_time);
4546  }
4547 #if HAVE_PTHREADS
4548  free_input_threads();
4549 #endif
4550 
4551  /* at the end of stream, we must flush the decoder buffers */
4552  for (i = 0; i < nb_input_streams; i++) {
4553  ist = input_streams[i];
4554  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4555  process_input_packet(ist, NULL, 0);
4556  }
4557  }
4558  flush_encoders();
4559 
4560  term_exit();
4561 
4562  /* write the trailer if needed and close file */
4563  for (i = 0; i < nb_output_files; i++) {
4564  os = output_files[i]->ctx;
4565  if (!output_files[i]->header_written) {
4567  "Nothing was written into output file %d (%s), because "
4568  "at least one of its streams received no packets.\n",
4569  i, os->filename);
4570  continue;
4571  }
4572  if ((ret = av_write_trailer(os)) < 0) {
4573  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4574  if (exit_on_error)
4575  exit_program(1);
4576  }
4577  }
4578 
4579  /* dump report by using the first video and audio streams */
4580  print_report(1, timer_start, av_gettime_relative());
4581 
4582  /* close each encoder */
4583  for (i = 0; i < nb_output_streams; i++) {
4584  ost = output_streams[i];
4585  if (ost->encoding_needed) {
4586  av_freep(&ost->enc_ctx->stats_in);
4587  }
4588  total_packets_written += ost->packets_written;
4589  }
4590 
4591  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4592  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4593  exit_program(1);
4594  }
4595 
4596  /* close each decoder */
4597  for (i = 0; i < nb_input_streams; i++) {
4598  ist = input_streams[i];
4599  if (ist->decoding_needed) {
4600  avcodec_close(ist->dec_ctx);
4601  if (ist->hwaccel_uninit)
4602  ist->hwaccel_uninit(ist->dec_ctx);
4603  }
4604  }
4605 
4607 
4608  /* finished ! */
4609  ret = 0;
4610 
4611  fail:
4612 #if HAVE_PTHREADS
4613  free_input_threads();
4614 #endif
4615 
4616  if (output_streams) {
4617  for (i = 0; i < nb_output_streams; i++) {
4618  ost = output_streams[i];
4619  if (ost) {
4620  if (ost->logfile) {
4621  if (fclose(ost->logfile))
4623  "Error closing logfile, loss of information possible: %s\n",
4624  av_err2str(AVERROR(errno)));
4625  ost->logfile = NULL;
4626  }
4627  av_freep(&ost->forced_kf_pts);
4628  av_freep(&ost->apad);
4629  av_freep(&ost->disposition);
4630  av_dict_free(&ost->encoder_opts);
4631  av_dict_free(&ost->sws_dict);
4632  av_dict_free(&ost->swr_opts);
4633  av_dict_free(&ost->resample_opts);
4634  }
4635  }
4636  }
4637  return ret;
4638 }
4639 
4640 
4641 static int64_t getutime(void)
4642 {
4643 #if HAVE_GETRUSAGE
4644  struct rusage rusage;
4645 
4646  getrusage(RUSAGE_SELF, &rusage);
4647  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4648 #elif HAVE_GETPROCESSTIMES
4649  HANDLE proc;
4650  FILETIME c, e, k, u;
4651  proc = GetCurrentProcess();
4652  GetProcessTimes(proc, &c, &e, &k, &u);
4653  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4654 #else
4655  return av_gettime_relative();
4656 #endif
4657 }
4658 
4659 static int64_t getmaxrss(void)
4660 {
4661 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4662  struct rusage rusage;
4663  getrusage(RUSAGE_SELF, &rusage);
4664  return (int64_t)rusage.ru_maxrss * 1024;
4665 #elif HAVE_GETPROCESSMEMORYINFO
4666  HANDLE proc;
4667  PROCESS_MEMORY_COUNTERS memcounters;
4668  proc = GetCurrentProcess();
4669  memcounters.cb = sizeof(memcounters);