FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223 
224  av_assert1(frame->data[0]);
225  ist->sub2video.last_pts = frame->pts = pts;
226  for (i = 0; i < ist->nb_filters; i++)
230 }
231 
233 {
234  AVFrame *frame = ist->sub2video.frame;
235  int8_t *dst;
236  int dst_linesize;
237  int num_rects, i;
238  int64_t pts, end_pts;
239 
240  if (!frame)
241  return;
242  if (sub) {
243  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244  AV_TIME_BASE_Q, ist->st->time_base);
245  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  num_rects = sub->num_rects;
248  } else {
249  pts = ist->sub2video.end_pts;
250  end_pts = INT64_MAX;
251  num_rects = 0;
252  }
253  if (sub2video_get_blank_frame(ist) < 0) {
255  "Impossible to get a blank canvas.\n");
256  return;
257  }
258  dst = frame->data [0];
259  dst_linesize = frame->linesize[0];
260  for (i = 0; i < num_rects; i++)
261  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262  sub2video_push_ref(ist, pts);
263  ist->sub2video.end_pts = end_pts;
264 }
265 
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 {
268  InputFile *infile = input_files[ist->file_index];
269  int i, j, nb_reqs;
270  int64_t pts2;
271 
272  /* When a frame is read from a file, examine all sub2video streams in
273  the same file and send the sub2video frame again. Otherwise, decoded
274  video frames could be accumulating in the filter graph while a filter
275  (possibly overlay) is desperately waiting for a subtitle frame. */
276  for (i = 0; i < infile->nb_streams; i++) {
277  InputStream *ist2 = input_streams[infile->ist_index + i];
278  if (!ist2->sub2video.frame)
279  continue;
280  /* subtitles seem to be usually muxed ahead of other streams;
281  if not, subtracting a larger time here is necessary */
282  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283  /* do not send the heartbeat frame if the subtitle is already ahead */
284  if (pts2 <= ist2->sub2video.last_pts)
285  continue;
286  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287  sub2video_update(ist2, NULL);
288  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290  if (nb_reqs)
291  sub2video_push_ref(ist2, pts2);
292  }
293 }
294 
295 static void sub2video_flush(InputStream *ist)
296 {
297  int i;
298 
299  if (ist->sub2video.end_pts < INT64_MAX)
300  sub2video_update(ist, NULL);
301  for (i = 0; i < ist->nb_filters; i++)
303 }
304 
305 /* end of sub2video hack */
306 
307 static void term_exit_sigsafe(void)
308 {
309 #if HAVE_TERMIOS_H
310  if(restore_tty)
311  tcsetattr (0, TCSANOW, &oldtty);
312 #endif
313 }
314 
315 void term_exit(void)
316 {
317  av_log(NULL, AV_LOG_QUIET, "%s", "");
319 }
320 
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
326 
327 static void
329 {
330  received_sigterm = sig;
333  if(received_nb_signals > 3) {
334  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335  strlen("Received > 3 system signals, hard exiting\n"));
336 
337  exit(123);
338  }
339 }
340 
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 {
344  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 
346  switch (fdwCtrlType)
347  {
348  case CTRL_C_EVENT:
349  case CTRL_BREAK_EVENT:
350  sigterm_handler(SIGINT);
351  return TRUE;
352 
353  case CTRL_CLOSE_EVENT:
354  case CTRL_LOGOFF_EVENT:
355  case CTRL_SHUTDOWN_EVENT:
356  sigterm_handler(SIGTERM);
357  /* Basically, with these 3 events, when we return from this method the
358  process is hard terminated, so stall as long as we need to
359  to try and let the main thread(s) clean up and gracefully terminate
360  (we have at most 5 seconds, but should be done far before that). */
361  while (!ffmpeg_exited) {
362  Sleep(0);
363  }
364  return TRUE;
365 
366  default:
367  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368  return FALSE;
369  }
370 }
371 #endif
372 
373 void term_init(void)
374 {
375 #if HAVE_TERMIOS_H
377  struct termios tty;
378  if (tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480  AVFrame *frame;
481  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482  sizeof(frame), NULL);
483  av_frame_free(&frame);
484  }
485  av_fifo_freep(&fg->inputs[j]->frame_queue);
486  if (fg->inputs[j]->ist->sub2video.sub_queue) {
487  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488  AVSubtitle sub;
490  &sub, sizeof(sub), NULL);
491  avsubtitle_free(&sub);
492  }
494  }
496  av_freep(&fg->inputs[j]->name);
497  av_freep(&fg->inputs[j]);
498  }
499  av_freep(&fg->inputs);
500  for (j = 0; j < fg->nb_outputs; j++) {
501  av_freep(&fg->outputs[j]->name);
502  av_freep(&fg->outputs[j]->formats);
503  av_freep(&fg->outputs[j]->channel_layouts);
504  av_freep(&fg->outputs[j]->sample_rates);
505  av_freep(&fg->outputs[j]);
506  }
507  av_freep(&fg->outputs);
508  av_freep(&fg->graph_desc);
509 
510  av_freep(&filtergraphs[i]);
511  }
512  av_freep(&filtergraphs);
513 
515 
516  /* close files */
517  for (i = 0; i < nb_output_files; i++) {
518  OutputFile *of = output_files[i];
520  if (!of)
521  continue;
522  s = of->ctx;
523  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524  avio_closep(&s->pb);
526  av_dict_free(&of->opts);
527 
528  av_freep(&output_files[i]);
529  }
530  for (i = 0; i < nb_output_streams; i++) {
531  OutputStream *ost = output_streams[i];
532 
533  if (!ost)
534  continue;
535 
536  for (j = 0; j < ost->nb_bitstream_filters; j++)
537  av_bsf_free(&ost->bsf_ctx[j]);
538  av_freep(&ost->bsf_ctx);
539 
541  av_frame_free(&ost->last_frame);
542  av_dict_free(&ost->encoder_opts);
543 
544  av_parser_close(ost->parser);
546 
547  av_freep(&ost->forced_keyframes);
549  av_freep(&ost->avfilter);
550  av_freep(&ost->logfile_prefix);
551 
553  ost->audio_channels_mapped = 0;
554 
555  av_dict_free(&ost->sws_dict);
556 
559 
560  if (ost->muxing_queue) {
561  while (av_fifo_size(ost->muxing_queue)) {
562  AVPacket pkt;
563  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564  av_packet_unref(&pkt);
565  }
567  }
568 
569  av_freep(&output_streams[i]);
570  }
571 #if HAVE_PTHREADS
572  free_input_threads();
573 #endif
574  for (i = 0; i < nb_input_files; i++) {
575  avformat_close_input(&input_files[i]->ctx);
576  av_freep(&input_files[i]);
577  }
578  for (i = 0; i < nb_input_streams; i++) {
579  InputStream *ist = input_streams[i];
580 
583  av_dict_free(&ist->decoder_opts);
586  av_freep(&ist->filters);
587  av_freep(&ist->hwaccel_device);
588  av_freep(&ist->dts_buffer);
589 
591 
592  av_freep(&input_streams[i]);
593  }
594 
595  if (vstats_file) {
596  if (fclose(vstats_file))
598  "Error closing vstats file, loss of information possible: %s\n",
599  av_err2str(AVERROR(errno)));
600  }
602 
603  av_freep(&input_streams);
604  av_freep(&input_files);
605  av_freep(&output_streams);
606  av_freep(&output_files);
607 
608  uninit_opts();
609 
611 
612  if (received_sigterm) {
613  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614  (int) received_sigterm);
615  } else if (ret && atomic_load(&transcode_init_done)) {
616  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
617  }
618  term_exit();
619  ffmpeg_exited = 1;
620 }
621 
623 {
624  AVDictionaryEntry *t = NULL;
625 
626  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628  }
629 }
630 
632 {
634  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
636  exit_program(1);
637  }
638 }
639 
640 static void abort_codec_experimental(AVCodec *c, int encoder)
641 {
642  exit_program(1);
643 }
644 
645 static void update_benchmark(const char *fmt, ...)
646 {
647  if (do_benchmark_all) {
648  int64_t t = getutime();
649  va_list va;
650  char buf[1024];
651 
652  if (fmt) {
653  va_start(va, fmt);
654  vsnprintf(buf, sizeof(buf), fmt, va);
655  va_end(va);
656  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
657  }
658  current_time = t;
659  }
660 }
661 
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 {
664  int i;
665  for (i = 0; i < nb_output_streams; i++) {
666  OutputStream *ost2 = output_streams[i];
667  ost2->finished |= ost == ost2 ? this_stream : others;
668  }
669 }
670 
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
672 {
673  AVFormatContext *s = of->ctx;
674  AVStream *st = ost->st;
675  int ret;
676 
677  /*
678  * Audio encoders may split the packets -- #frames in != #packets out.
679  * But there is no reordering, so we can limit the number of output packets
680  * by simply dropping them here.
681  * Counting encoded video frames needs to be done separately because of
682  * reordering, see do_video_out().
683  * Do not count the packet when unqueued because it has been counted when queued.
684  */
685  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686  if (ost->frame_number >= ost->max_frames) {
687  av_packet_unref(pkt);
688  return;
689  }
690  ost->frame_number++;
691  }
692 
693  if (!of->header_written) {
694  AVPacket tmp_pkt = {0};
695  /* the muxer is not initialized yet, buffer the packet */
696  if (!av_fifo_space(ost->muxing_queue)) {
697  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698  ost->max_muxing_queue_size);
699  if (new_size <= av_fifo_size(ost->muxing_queue)) {
701  "Too many packets buffered for output stream %d:%d.\n",
702  ost->file_index, ost->st->index);
703  exit_program(1);
704  }
705  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
706  if (ret < 0)
707  exit_program(1);
708  }
709  ret = av_packet_ref(&tmp_pkt, pkt);
710  if (ret < 0)
711  exit_program(1);
712  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713  av_packet_unref(pkt);
714  return;
715  }
716 
719  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
720 
721  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
722  int i;
724  NULL);
725  ost->quality = sd ? AV_RL32(sd) : -1;
726  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
727 
728  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
729  if (sd && i < sd[5])
730  ost->error[i] = AV_RL64(sd + 8 + 8*i);
731  else
732  ost->error[i] = -1;
733  }
734 
735  if (ost->frame_rate.num && ost->is_cfr) {
736  if (pkt->duration > 0)
737  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739  ost->mux_timebase);
740  }
741  }
742 
743  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
744 
745  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746  if (pkt->dts != AV_NOPTS_VALUE &&
747  pkt->pts != AV_NOPTS_VALUE &&
748  pkt->dts > pkt->pts) {
749  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
750  pkt->dts, pkt->pts,
751  ost->file_index, ost->st->index);
752  pkt->pts =
753  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
756  }
758  pkt->dts != AV_NOPTS_VALUE &&
759  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760  ost->last_mux_dts != AV_NOPTS_VALUE) {
761  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762  if (pkt->dts < max) {
763  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764  av_log(s, loglevel, "Non-monotonous DTS in output stream "
765  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
767  if (exit_on_error) {
768  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
769  exit_program(1);
770  }
771  av_log(s, loglevel, "changing to %"PRId64". This may result "
772  "in incorrect timestamps in the output file.\n",
773  max);
774  if (pkt->pts >= pkt->dts)
775  pkt->pts = FFMAX(pkt->pts, max);
776  pkt->dts = max;
777  }
778  }
779  }
780  ost->last_mux_dts = pkt->dts;
781 
782  ost->data_size += pkt->size;
783  ost->packets_written++;
784 
785  pkt->stream_index = ost->index;
786 
787  if (debug_ts) {
788  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793  pkt->size
794  );
795  }
796 
797  ret = av_interleaved_write_frame(s, pkt);
798  if (ret < 0) {
799  print_error("av_interleaved_write_frame()", ret);
800  main_return_code = 1;
802  }
803  av_packet_unref(pkt);
804 }
805 
807 {
808  OutputFile *of = output_files[ost->file_index];
809 
810  ost->finished |= ENCODER_FINISHED;
811  if (of->shortest) {
812  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813  of->recording_time = FFMIN(of->recording_time, end);
814  }
815 }
816 
817 /*
818  * Send a single packet to the output, applying any bitstream filters
819  * associated with the output stream. This may result in any number
820  * of packets actually being written, depending on what bitstream
821  * filters are applied. The supplied packet is consumed and will be
822  * blank (as if newly-allocated) when this function returns.
823  *
824  * If eof is set, instead indicate EOF to all bitstream filters and
825  * therefore flush any delayed packets to the output. A blank packet
826  * must be supplied in this case.
827  */
829  OutputStream *ost, int eof)
830 {
831  int ret = 0;
832 
833  /* apply the output bitstream filters, if any */
834  if (ost->nb_bitstream_filters) {
835  int idx;
836 
837  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
838  if (ret < 0)
839  goto finish;
840 
841  eof = 0;
842  idx = 1;
843  while (idx) {
844  /* get a packet from the previous filter up the chain */
845  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846  if (ret == AVERROR(EAGAIN)) {
847  ret = 0;
848  idx--;
849  continue;
850  } else if (ret == AVERROR_EOF) {
851  eof = 1;
852  } else if (ret < 0)
853  goto finish;
854 
855  /* send it to the next filter down the chain or to the muxer */
856  if (idx < ost->nb_bitstream_filters) {
857  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
858  if (ret < 0)
859  goto finish;
860  idx++;
861  eof = 0;
862  } else if (eof)
863  goto finish;
864  else
865  write_packet(of, pkt, ost, 0);
866  }
867  } else if (!eof)
868  write_packet(of, pkt, ost, 0);
869 
870 finish:
871  if (ret < 0 && ret != AVERROR_EOF) {
872  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
874  if(exit_on_error)
875  exit_program(1);
876  }
877 }
878 
880 {
881  OutputFile *of = output_files[ost->file_index];
882 
883  if (of->recording_time != INT64_MAX &&
885  AV_TIME_BASE_Q) >= 0) {
886  close_output_stream(ost);
887  return 0;
888  }
889  return 1;
890 }
891 
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
893  AVFrame *frame)
894 {
895  AVCodecContext *enc = ost->enc_ctx;
896  AVPacket pkt;
897  int ret;
898 
899  av_init_packet(&pkt);
900  pkt.data = NULL;
901  pkt.size = 0;
902 
903  if (!check_recording_time(ost))
904  return;
905 
906  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907  frame->pts = ost->sync_opts;
908  ost->sync_opts = frame->pts + frame->nb_samples;
909  ost->samples_encoded += frame->nb_samples;
910  ost->frames_encoded++;
911 
912  av_assert0(pkt.size || !pkt.data);
914  if (debug_ts) {
915  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918  enc->time_base.num, enc->time_base.den);
919  }
920 
921  ret = avcodec_send_frame(enc, frame);
922  if (ret < 0)
923  goto error;
924 
925  while (1) {
926  ret = avcodec_receive_packet(enc, &pkt);
927  if (ret == AVERROR(EAGAIN))
928  break;
929  if (ret < 0)
930  goto error;
931 
932  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
933 
934  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
935 
936  if (debug_ts) {
937  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
941  }
942 
943  output_packet(of, &pkt, ost, 0);
944  }
945 
946  return;
947 error:
948  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
949  exit_program(1);
950 }
951 
952 static void do_subtitle_out(OutputFile *of,
953  OutputStream *ost,
954  AVSubtitle *sub)
955 {
956  int subtitle_out_max_size = 1024 * 1024;
957  int subtitle_out_size, nb, i;
958  AVCodecContext *enc;
959  AVPacket pkt;
960  int64_t pts;
961 
962  if (sub->pts == AV_NOPTS_VALUE) {
963  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
964  if (exit_on_error)
965  exit_program(1);
966  return;
967  }
968 
969  enc = ost->enc_ctx;
970 
971  if (!subtitle_out) {
972  subtitle_out = av_malloc(subtitle_out_max_size);
973  if (!subtitle_out) {
974  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
975  exit_program(1);
976  }
977  }
978 
979  /* Note: DVB subtitle need one packet to draw them and one other
980  packet to clear them */
981  /* XXX: signal it in the codec context ? */
983  nb = 2;
984  else
985  nb = 1;
986 
987  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
988  pts = sub->pts;
989  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990  pts -= output_files[ost->file_index]->start_time;
991  for (i = 0; i < nb; i++) {
992  unsigned save_num_rects = sub->num_rects;
993 
994  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995  if (!check_recording_time(ost))
996  return;
997 
998  sub->pts = pts;
999  // start_display_time is required to be 0
1000  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001  sub->end_display_time -= sub->start_display_time;
1002  sub->start_display_time = 0;
1003  if (i == 1)
1004  sub->num_rects = 0;
1005 
1006  ost->frames_encoded++;
1007 
1008  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009  subtitle_out_max_size, sub);
1010  if (i == 1)
1011  sub->num_rects = save_num_rects;
1012  if (subtitle_out_size < 0) {
1013  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1014  exit_program(1);
1015  }
1016 
1017  av_init_packet(&pkt);
1018  pkt.data = subtitle_out;
1019  pkt.size = subtitle_out_size;
1020  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023  /* XXX: the pts correction is handled here. Maybe handling
1024  it in the codec would be better */
1025  if (i == 0)
1026  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027  else
1028  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029  }
1030  pkt.dts = pkt.pts;
1031  output_packet(of, &pkt, ost, 0);
1032  }
1033 }
1034 
1035 static void do_video_out(OutputFile *of,
1036  OutputStream *ost,
1037  AVFrame *next_picture,
1038  double sync_ipts)
1039 {
1040  int ret, format_video_sync;
1041  AVPacket pkt;
1042  AVCodecContext *enc = ost->enc_ctx;
1043  AVCodecParameters *mux_par = ost->st->codecpar;
1044  AVRational frame_rate;
1045  int nb_frames, nb0_frames, i;
1046  double delta, delta0;
1047  double duration = 0;
1048  int frame_size = 0;
1049  InputStream *ist = NULL;
1051 
1052  if (ost->source_index >= 0)
1053  ist = input_streams[ost->source_index];
1054 
1055  frame_rate = av_buffersink_get_frame_rate(filter);
1056  if (frame_rate.num > 0 && frame_rate.den > 0)
1057  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1058 
1059  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1061 
1062  if (!ost->filters_script &&
1063  !ost->filters &&
1064  next_picture &&
1065  ist &&
1066  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1068  }
1069 
1070  if (!next_picture) {
1071  //end, flushing
1072  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073  ost->last_nb0_frames[1],
1074  ost->last_nb0_frames[2]);
1075  } else {
1076  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077  delta = delta0 + duration;
1078 
1079  /* by default, we output a single frame */
1080  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1081  nb_frames = 1;
1082 
1083  format_video_sync = video_sync_method;
1084  if (format_video_sync == VSYNC_AUTO) {
1085  if(!strcmp(of->ctx->oformat->name, "avi")) {
1086  format_video_sync = VSYNC_VFR;
1087  } else
1088  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1089  if ( ist
1090  && format_video_sync == VSYNC_CFR
1091  && input_files[ist->file_index]->ctx->nb_streams == 1
1092  && input_files[ist->file_index]->input_ts_offset == 0) {
1093  format_video_sync = VSYNC_VSCFR;
1094  }
1095  if (format_video_sync == VSYNC_CFR && copy_ts) {
1096  format_video_sync = VSYNC_VSCFR;
1097  }
1098  }
1099  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1100 
1101  if (delta0 < 0 &&
1102  delta > 0 &&
1103  format_video_sync != VSYNC_PASSTHROUGH &&
1104  format_video_sync != VSYNC_DROP) {
1105  if (delta0 < -0.6) {
1106  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1107  } else
1108  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109  sync_ipts = ost->sync_opts;
1110  duration += delta0;
1111  delta0 = 0;
1112  }
1113 
1114  switch (format_video_sync) {
1115  case VSYNC_VSCFR:
1116  if (ost->frame_number == 0 && delta0 >= 0.5) {
1117  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1118  delta = duration;
1119  delta0 = 0;
1120  ost->sync_opts = lrint(sync_ipts);
1121  }
1122  case VSYNC_CFR:
1123  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1125  nb_frames = 0;
1126  } else if (delta < -1.1)
1127  nb_frames = 0;
1128  else if (delta > 1.1) {
1129  nb_frames = lrintf(delta);
1130  if (delta0 > 1.1)
1131  nb0_frames = lrintf(delta0 - 0.6);
1132  }
1133  break;
1134  case VSYNC_VFR:
1135  if (delta <= -0.6)
1136  nb_frames = 0;
1137  else if (delta > 0.6)
1138  ost->sync_opts = lrint(sync_ipts);
1139  break;
1140  case VSYNC_DROP:
1141  case VSYNC_PASSTHROUGH:
1142  ost->sync_opts = lrint(sync_ipts);
1143  break;
1144  default:
1145  av_assert0(0);
1146  }
1147  }
1148 
1149  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150  nb0_frames = FFMIN(nb0_frames, nb_frames);
1151 
1152  memmove(ost->last_nb0_frames + 1,
1153  ost->last_nb0_frames,
1154  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155  ost->last_nb0_frames[0] = nb0_frames;
1156 
1157  if (nb0_frames == 0 && ost->last_dropped) {
1158  nb_frames_drop++;
1160  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161  ost->frame_number, ost->st->index, ost->last_frame->pts);
1162  }
1163  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164  if (nb_frames > dts_error_threshold * 30) {
1165  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1166  nb_frames_drop++;
1167  return;
1168  }
1169  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171  if (nb_frames_dup > dup_warning) {
1172  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1173  dup_warning *= 10;
1174  }
1175  }
1176  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1177 
1178  /* duplicates frame if needed */
1179  for (i = 0; i < nb_frames; i++) {
1180  AVFrame *in_picture;
1181  av_init_packet(&pkt);
1182  pkt.data = NULL;
1183  pkt.size = 0;
1184 
1185  if (i < nb0_frames && ost->last_frame) {
1186  in_picture = ost->last_frame;
1187  } else
1188  in_picture = next_picture;
1189 
1190  if (!in_picture)
1191  return;
1192 
1193  in_picture->pts = ost->sync_opts;
1194 
1195 #if 1
1196  if (!check_recording_time(ost))
1197 #else
1198  if (ost->frame_number >= ost->max_frames)
1199 #endif
1200  return;
1201 
1202  {
1203  int forced_keyframe = 0;
1204  double pts_time;
1205 
1207  ost->top_field_first >= 0)
1208  in_picture->top_field_first = !!ost->top_field_first;
1209 
1210  if (in_picture->interlaced_frame) {
1211  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1212  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1213  else
1214  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1215  } else
1216  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1217 
1218  in_picture->quality = enc->global_quality;
1219  in_picture->pict_type = 0;
1220 
1221  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1222  in_picture->pts * av_q2d(enc->time_base) : NAN;
1223  if (ost->forced_kf_index < ost->forced_kf_count &&
1224  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1225  ost->forced_kf_index++;
1226  forced_keyframe = 1;
1227  } else if (ost->forced_keyframes_pexpr) {
1228  double res;
1229  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1232  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1238  res);
1239  if (res) {
1240  forced_keyframe = 1;
1246  }
1247 
1249  } else if ( ost->forced_keyframes
1250  && !strncmp(ost->forced_keyframes, "source", 6)
1251  && in_picture->key_frame==1) {
1252  forced_keyframe = 1;
1253  }
1254 
1255  if (forced_keyframe) {
1256  in_picture->pict_type = AV_PICTURE_TYPE_I;
1257  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1258  }
1259 
1261  if (debug_ts) {
1262  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1263  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1264  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1265  enc->time_base.num, enc->time_base.den);
1266  }
1267 
1268  ost->frames_encoded++;
1269 
1270  ret = avcodec_send_frame(enc, in_picture);
1271  if (ret < 0)
1272  goto error;
1273 
1274  while (1) {
1275  ret = avcodec_receive_packet(enc, &pkt);
1276  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1277  if (ret == AVERROR(EAGAIN))
1278  break;
1279  if (ret < 0)
1280  goto error;
1281 
1282  if (debug_ts) {
1283  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1284  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1285  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1286  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1287  }
1288 
1289  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1290  pkt.pts = ost->sync_opts;
1291 
1292  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1293 
1294  if (debug_ts) {
1295  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1296  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1297  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1298  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1299  }
1300 
1301  frame_size = pkt.size;
1302  output_packet(of, &pkt, ost, 0);
1303 
1304  /* if two pass, output log */
1305  if (ost->logfile && enc->stats_out) {
1306  fprintf(ost->logfile, "%s", enc->stats_out);
1307  }
1308  }
1309  }
1310  ost->sync_opts++;
1311  /*
1312  * For video, number of frames in == number of packets out.
1313  * But there may be reordering, so we can't throw away frames on encoder
1314  * flush, we need to limit them here, before they go into encoder.
1315  */
1316  ost->frame_number++;
1317 
1318  if (vstats_filename && frame_size)
1319  do_video_stats(ost, frame_size);
1320  }
1321 
1322  if (!ost->last_frame)
1323  ost->last_frame = av_frame_alloc();
1324  av_frame_unref(ost->last_frame);
1325  if (next_picture && ost->last_frame)
1326  av_frame_ref(ost->last_frame, next_picture);
1327  else
1328  av_frame_free(&ost->last_frame);
1329 
1330  return;
1331 error:
1332  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1333  exit_program(1);
1334 }
1335 
1336 static double psnr(double d)
1337 {
1338  return -10.0 * log10(d);
1339 }
1340 
1342 {
1343  AVCodecContext *enc;
1344  int frame_number;
1345  double ti1, bitrate, avg_bitrate;
1346 
1347  /* this is executed just the first time do_video_stats is called */
1348  if (!vstats_file) {
1349  vstats_file = fopen(vstats_filename, "w");
1350  if (!vstats_file) {
1351  perror("fopen");
1352  exit_program(1);
1353  }
1354  }
1355 
1356  enc = ost->enc_ctx;
1357  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1358  frame_number = ost->st->nb_frames;
1359  if (vstats_version <= 1) {
1360  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1361  ost->quality / (float)FF_QP2LAMBDA);
1362  } else {
1363  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1364  ost->quality / (float)FF_QP2LAMBDA);
1365  }
1366 
1367  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1368  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1369 
1370  fprintf(vstats_file,"f_size= %6d ", frame_size);
1371  /* compute pts value */
1372  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1373  if (ti1 < 0.01)
1374  ti1 = 0.01;
1375 
1376  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1377  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1378  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1379  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1380  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1381  }
1382 }
1383 
1384 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1385 
1387 {
1388  OutputFile *of = output_files[ost->file_index];
1389  int i;
1390 
1392 
1393  if (of->shortest) {
1394  for (i = 0; i < of->ctx->nb_streams; i++)
1395  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1396  }
1397 }
1398 
1399 /**
1400  * Get and encode new output from any of the filtergraphs, without causing
1401  * activity.
1402  *
1403  * @return 0 for success, <0 for severe errors
1404  */
1405 static int reap_filters(int flush)
1406 {
1407  AVFrame *filtered_frame = NULL;
1408  int i;
1409 
1410  /* Reap all buffers present in the buffer sinks */
1411  for (i = 0; i < nb_output_streams; i++) {
1412  OutputStream *ost = output_streams[i];
1413  OutputFile *of = output_files[ost->file_index];
1415  AVCodecContext *enc = ost->enc_ctx;
1416  int ret = 0;
1417 
1418  if (!ost->filter || !ost->filter->graph->graph)
1419  continue;
1420  filter = ost->filter->filter;
1421 
1422  if (!ost->initialized) {
1423  char error[1024] = "";
1424  ret = init_output_stream(ost, error, sizeof(error));
1425  if (ret < 0) {
1426  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1427  ost->file_index, ost->index, error);
1428  exit_program(1);
1429  }
1430  }
1431 
1432  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1433  return AVERROR(ENOMEM);
1434  }
1435  filtered_frame = ost->filtered_frame;
1436 
1437  while (1) {
1438  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1439  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1441  if (ret < 0) {
1442  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1444  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1445  } else if (flush && ret == AVERROR_EOF) {
1447  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1448  }
1449  break;
1450  }
1451  if (ost->finished) {
1452  av_frame_unref(filtered_frame);
1453  continue;
1454  }
1455  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1456  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1457  AVRational filter_tb = av_buffersink_get_time_base(filter);
1458  AVRational tb = enc->time_base;
1459  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1460 
1461  tb.den <<= extra_bits;
1462  float_pts =
1463  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1464  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1465  float_pts /= 1 << extra_bits;
1466  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1467  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1468 
1469  filtered_frame->pts =
1470  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1471  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1472  }
1473  //if (ost->source_index >= 0)
1474  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1475 
1476  switch (av_buffersink_get_type(filter)) {
1477  case AVMEDIA_TYPE_VIDEO:
1478  if (!ost->frame_aspect_ratio.num)
1479  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1480 
1481  if (debug_ts) {
1482  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1483  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1484  float_pts,
1485  enc->time_base.num, enc->time_base.den);
1486  }
1487 
1488  do_video_out(of, ost, filtered_frame, float_pts);
1489  break;
1490  case AVMEDIA_TYPE_AUDIO:
1491  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1492  enc->channels != filtered_frame->channels) {
1494  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1495  break;
1496  }
1497  do_audio_out(of, ost, filtered_frame);
1498  break;
1499  default:
1500  // TODO support subtitle filters
1501  av_assert0(0);
1502  }
1503 
1504  av_frame_unref(filtered_frame);
1505  }
1506  }
1507 
1508  return 0;
1509 }
1510 
1511 static void print_final_stats(int64_t total_size)
1512 {
1513  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1514  uint64_t subtitle_size = 0;
1515  uint64_t data_size = 0;
1516  float percent = -1.0;
1517  int i, j;
1518  int pass1_used = 1;
1519 
1520  for (i = 0; i < nb_output_streams; i++) {
1521  OutputStream *ost = output_streams[i];
1522  switch (ost->enc_ctx->codec_type) {
1523  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1524  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1525  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1526  default: other_size += ost->data_size; break;
1527  }
1528  extra_size += ost->enc_ctx->extradata_size;
1529  data_size += ost->data_size;
1532  pass1_used = 0;
1533  }
1534 
1535  if (data_size && total_size>0 && total_size >= data_size)
1536  percent = 100.0 * (total_size - data_size) / data_size;
1537 
1538  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1539  video_size / 1024.0,
1540  audio_size / 1024.0,
1541  subtitle_size / 1024.0,
1542  other_size / 1024.0,
1543  extra_size / 1024.0);
1544  if (percent >= 0.0)
1545  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1546  else
1547  av_log(NULL, AV_LOG_INFO, "unknown");
1548  av_log(NULL, AV_LOG_INFO, "\n");
1549 
1550  /* print verbose per-stream stats */
1551  for (i = 0; i < nb_input_files; i++) {
1552  InputFile *f = input_files[i];
1553  uint64_t total_packets = 0, total_size = 0;
1554 
1555  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1556  i, f->ctx->filename);
1557 
1558  for (j = 0; j < f->nb_streams; j++) {
1559  InputStream *ist = input_streams[f->ist_index + j];
1560  enum AVMediaType type = ist->dec_ctx->codec_type;
1561 
1562  total_size += ist->data_size;
1563  total_packets += ist->nb_packets;
1564 
1565  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1566  i, j, media_type_string(type));
1567  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1568  ist->nb_packets, ist->data_size);
1569 
1570  if (ist->decoding_needed) {
1571  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1572  ist->frames_decoded);
1573  if (type == AVMEDIA_TYPE_AUDIO)
1574  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1575  av_log(NULL, AV_LOG_VERBOSE, "; ");
1576  }
1577 
1578  av_log(NULL, AV_LOG_VERBOSE, "\n");
1579  }
1580 
1581  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1582  total_packets, total_size);
1583  }
1584 
1585  for (i = 0; i < nb_output_files; i++) {
1586  OutputFile *of = output_files[i];
1587  uint64_t total_packets = 0, total_size = 0;
1588 
1589  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1590  i, of->ctx->filename);
1591 
1592  for (j = 0; j < of->ctx->nb_streams; j++) {
1593  OutputStream *ost = output_streams[of->ost_index + j];
1594  enum AVMediaType type = ost->enc_ctx->codec_type;
1595 
1596  total_size += ost->data_size;
1597  total_packets += ost->packets_written;
1598 
1599  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1600  i, j, media_type_string(type));
1601  if (ost->encoding_needed) {
1602  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1603  ost->frames_encoded);
1604  if (type == AVMEDIA_TYPE_AUDIO)
1605  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1606  av_log(NULL, AV_LOG_VERBOSE, "; ");
1607  }
1608 
1609  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1610  ost->packets_written, ost->data_size);
1611 
1612  av_log(NULL, AV_LOG_VERBOSE, "\n");
1613  }
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1616  total_packets, total_size);
1617  }
1618  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1619  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1620  if (pass1_used) {
1621  av_log(NULL, AV_LOG_WARNING, "\n");
1622  } else {
1623  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1624  }
1625  }
1626 }
1627 
1628 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1629 {
1630  char buf[1024];
1631  AVBPrint buf_script;
1632  OutputStream *ost;
1633  AVFormatContext *oc;
1634  int64_t total_size;
1635  AVCodecContext *enc;
1636  int frame_number, vid, i;
1637  double bitrate;
1638  double speed;
1639  int64_t pts = INT64_MIN + 1;
1640  static int64_t last_time = -1;
1641  static int qp_histogram[52];
1642  int hours, mins, secs, us;
1643  int ret;
1644  float t;
1645 
1646  if (!print_stats && !is_last_report && !progress_avio)
1647  return;
1648 
1649  if (!is_last_report) {
1650  if (last_time == -1) {
1651  last_time = cur_time;
1652  return;
1653  }
1654  if ((cur_time - last_time) < 500000)
1655  return;
1656  last_time = cur_time;
1657  }
1658 
1659  t = (cur_time-timer_start) / 1000000.0;
1660 
1661 
1662  oc = output_files[0]->ctx;
1663 
1664  total_size = avio_size(oc->pb);
1665  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1666  total_size = avio_tell(oc->pb);
1667 
1668  buf[0] = '\0';
1669  vid = 0;
1670  av_bprint_init(&buf_script, 0, 1);
1671  for (i = 0; i < nb_output_streams; i++) {
1672  float q = -1;
1673  ost = output_streams[i];
1674  enc = ost->enc_ctx;
1675  if (!ost->stream_copy)
1676  q = ost->quality / (float) FF_QP2LAMBDA;
1677 
1678  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1679  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1680  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1681  ost->file_index, ost->index, q);
1682  }
1683  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1684  float fps;
1685 
1686  frame_number = ost->frame_number;
1687  fps = t > 1 ? frame_number / t : 0;
1688  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1689  frame_number, fps < 9.95, fps, q);
1690  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1691  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1692  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1693  ost->file_index, ost->index, q);
1694  if (is_last_report)
1695  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1696  if (qp_hist) {
1697  int j;
1698  int qp = lrintf(q);
1699  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1700  qp_histogram[qp]++;
1701  for (j = 0; j < 32; j++)
1702  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1703  }
1704 
1705  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1706  int j;
1707  double error, error_sum = 0;
1708  double scale, scale_sum = 0;
1709  double p;
1710  char type[3] = { 'Y','U','V' };
1711  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1712  for (j = 0; j < 3; j++) {
1713  if (is_last_report) {
1714  error = enc->error[j];
1715  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1716  } else {
1717  error = ost->error[j];
1718  scale = enc->width * enc->height * 255.0 * 255.0;
1719  }
1720  if (j)
1721  scale /= 4;
1722  error_sum += error;
1723  scale_sum += scale;
1724  p = psnr(error / scale);
1725  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1726  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1727  ost->file_index, ost->index, type[j] | 32, p);
1728  }
1729  p = psnr(error_sum / scale_sum);
1730  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1731  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1732  ost->file_index, ost->index, p);
1733  }
1734  vid = 1;
1735  }
1736  /* compute min output value */
1738  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1739  ost->st->time_base, AV_TIME_BASE_Q));
1740  if (is_last_report)
1741  nb_frames_drop += ost->last_dropped;
1742  }
1743 
1744  secs = FFABS(pts) / AV_TIME_BASE;
1745  us = FFABS(pts) % AV_TIME_BASE;
1746  mins = secs / 60;
1747  secs %= 60;
1748  hours = mins / 60;
1749  mins %= 60;
1750 
1751  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1752  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1753 
1754  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1755  "size=N/A time=");
1756  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1757  "size=%8.0fkB time=", total_size / 1024.0);
1758  if (pts < 0)
1759  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1760  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1761  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1762  (100 * us) / AV_TIME_BASE);
1763 
1764  if (bitrate < 0) {
1765  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1766  av_bprintf(&buf_script, "bitrate=N/A\n");
1767  }else{
1768  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1769  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1770  }
1771 
1772  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1773  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1774  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1775  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1776  hours, mins, secs, us);
1777 
1779  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1781  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1782  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1783 
1784  if (speed < 0) {
1785  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1786  av_bprintf(&buf_script, "speed=N/A\n");
1787  } else {
1788  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1789  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1790  }
1791 
1792  if (print_stats || is_last_report) {
1793  const char end = is_last_report ? '\n' : '\r';
1794  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1795  fprintf(stderr, "%s %c", buf, end);
1796  } else
1797  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1798 
1799  fflush(stderr);
1800  }
1801 
1802  if (progress_avio) {
1803  av_bprintf(&buf_script, "progress=%s\n",
1804  is_last_report ? "end" : "continue");
1805  avio_write(progress_avio, buf_script.str,
1806  FFMIN(buf_script.len, buf_script.size - 1));
1807  avio_flush(progress_avio);
1808  av_bprint_finalize(&buf_script, NULL);
1809  if (is_last_report) {
1810  if ((ret = avio_closep(&progress_avio)) < 0)
1812  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1813  }
1814  }
1815 
1816  if (is_last_report)
1817  print_final_stats(total_size);
1818 }
1819 
1820 static void flush_encoders(void)
1821 {
1822  int i, ret;
1823 
1824  for (i = 0; i < nb_output_streams; i++) {
1825  OutputStream *ost = output_streams[i];
1826  AVCodecContext *enc = ost->enc_ctx;
1827  OutputFile *of = output_files[ost->file_index];
1828 
1829  if (!ost->encoding_needed)
1830  continue;
1831 
1832  // Try to enable encoding with no input frames.
1833  // Maybe we should just let encoding fail instead.
1834  if (!ost->initialized) {
1835  FilterGraph *fg = ost->filter->graph;
1836  char error[1024] = "";
1837 
1839  "Finishing stream %d:%d without any data written to it.\n",
1840  ost->file_index, ost->st->index);
1841 
1842  if (ost->filter && !fg->graph) {
1843  int x;
1844  for (x = 0; x < fg->nb_inputs; x++) {
1845  InputFilter *ifilter = fg->inputs[x];
1846  if (ifilter->format < 0) {
1847  AVCodecParameters *par = ifilter->ist->st->codecpar;
1848  // We never got any input. Set a fake format, which will
1849  // come from libavformat.
1850  ifilter->format = par->format;
1851  ifilter->sample_rate = par->sample_rate;
1852  ifilter->channels = par->channels;
1853  ifilter->channel_layout = par->channel_layout;
1854  ifilter->width = par->width;
1855  ifilter->height = par->height;
1856  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1857  }
1858  }
1859 
1861  continue;
1862 
1863  ret = configure_filtergraph(fg);
1864  if (ret < 0) {
1865  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1866  exit_program(1);
1867  }
1868 
1869  finish_output_stream(ost);
1870  }
1871 
1872  ret = init_output_stream(ost, error, sizeof(error));
1873  if (ret < 0) {
1874  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1875  ost->file_index, ost->index, error);
1876  exit_program(1);
1877  }
1878  }
1879 
1880  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1881  continue;
1882 
1884  continue;
1885 
1886  for (;;) {
1887  const char *desc = NULL;
1888  AVPacket pkt;
1889  int pkt_size;
1890 
1891  switch (enc->codec_type) {
1892  case AVMEDIA_TYPE_AUDIO:
1893  desc = "audio";
1894  break;
1895  case AVMEDIA_TYPE_VIDEO:
1896  desc = "video";
1897  break;
1898  default:
1899  av_assert0(0);
1900  }
1901 
1902  av_init_packet(&pkt);
1903  pkt.data = NULL;
1904  pkt.size = 0;
1905 
1907 
1908  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1909  ret = avcodec_send_frame(enc, NULL);
1910  if (ret < 0) {
1911  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1912  desc,
1913  av_err2str(ret));
1914  exit_program(1);
1915  }
1916  }
1917 
1918  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1919  if (ret < 0 && ret != AVERROR_EOF) {
1920  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1921  desc,
1922  av_err2str(ret));
1923  exit_program(1);
1924  }
1925  if (ost->logfile && enc->stats_out) {
1926  fprintf(ost->logfile, "%s", enc->stats_out);
1927  }
1928  if (ret == AVERROR_EOF) {
1929  output_packet(of, &pkt, ost, 1);
1930  break;
1931  }
1932  if (ost->finished & MUXER_FINISHED) {
1933  av_packet_unref(&pkt);
1934  continue;
1935  }
1936  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1937  pkt_size = pkt.size;
1938  output_packet(of, &pkt, ost, 0);
1940  do_video_stats(ost, pkt_size);
1941  }
1942  }
1943  }
1944 }
1945 
1946 /*
1947  * Check whether a packet from ist should be written into ost at this time
1948  */
1950 {
1951  OutputFile *of = output_files[ost->file_index];
1952  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1953 
1954  if (ost->source_index != ist_index)
1955  return 0;
1956 
1957  if (ost->finished)
1958  return 0;
1959 
1960  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1961  return 0;
1962 
1963  return 1;
1964 }
1965 
1966 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1967 {
1968  OutputFile *of = output_files[ost->file_index];
1969  InputFile *f = input_files [ist->file_index];
1970  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1971  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1972  AVPacket opkt = { 0 };
1973 
1974  av_init_packet(&opkt);
1975 
1976  // EOF: flush output bitstream filters.
1977  if (!pkt) {
1978  output_packet(of, &opkt, ost, 1);
1979  return;
1980  }
1981 
1982  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1984  return;
1985 
1986  if (!ost->frame_number && !ost->copy_prior_start) {
1987  int64_t comp_start = start_time;
1988  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1989  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1990  if (pkt->pts == AV_NOPTS_VALUE ?
1991  ist->pts < comp_start :
1992  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1993  return;
1994  }
1995 
1996  if (of->recording_time != INT64_MAX &&
1997  ist->pts >= of->recording_time + start_time) {
1998  close_output_stream(ost);
1999  return;
2000  }
2001 
2002  if (f->recording_time != INT64_MAX) {
2003  start_time = f->ctx->start_time;
2004  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2005  start_time += f->start_time;
2006  if (ist->pts >= f->recording_time + start_time) {
2007  close_output_stream(ost);
2008  return;
2009  }
2010  }
2011 
2012  /* force the input stream PTS */
2013  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2014  ost->sync_opts++;
2015 
2016  if (pkt->pts != AV_NOPTS_VALUE)
2017  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2018  else
2019  opkt.pts = AV_NOPTS_VALUE;
2020 
2021  if (pkt->dts == AV_NOPTS_VALUE)
2022  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2023  else
2024  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2025  opkt.dts -= ost_tb_start_time;
2026 
2027  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2029  if(!duration)
2030  duration = ist->dec_ctx->frame_size;
2031  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2033  ost->mux_timebase) - ost_tb_start_time;
2034  }
2035 
2036  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2037 
2038  opkt.flags = pkt->flags;
2039  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2040  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2041  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2042  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2043  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2044  ) {
2045  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2046  &opkt.data, &opkt.size,
2047  pkt->data, pkt->size,
2049  if (ret < 0) {
2050  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2051  av_err2str(ret));
2052  exit_program(1);
2053  }
2054  if (ret) {
2055  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2056  if (!opkt.buf)
2057  exit_program(1);
2058  }
2059  } else {
2060  opkt.data = pkt->data;
2061  opkt.size = pkt->size;
2062  }
2063  av_copy_packet_side_data(&opkt, pkt);
2064 
2065  output_packet(of, &opkt, ost, 0);
2066 }
2067 
2069 {
2070  AVCodecContext *dec = ist->dec_ctx;
2071 
2072  if (!dec->channel_layout) {
2073  char layout_name[256];
2074 
2075  if (dec->channels > ist->guess_layout_max)
2076  return 0;
2078  if (!dec->channel_layout)
2079  return 0;
2080  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2081  dec->channels, dec->channel_layout);
2082  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2083  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2084  }
2085  return 1;
2086 }
2087 
2088 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2089 {
2090  if (*got_output || ret<0)
2091  decode_error_stat[ret<0] ++;
2092 
2093  if (ret < 0 && exit_on_error)
2094  exit_program(1);
2095 
2096  if (exit_on_error && *got_output && ist) {
2098  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2099  exit_program(1);
2100  }
2101  }
2102 }
2103 
2104 // Filters can be configured only if the formats of all inputs are known.
2106 {
2107  int i;
2108  for (i = 0; i < fg->nb_inputs; i++) {
2109  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2110  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2111  return 0;
2112  }
2113  return 1;
2114 }
2115 
2117 {
2118  FilterGraph *fg = ifilter->graph;
2119  int need_reinit, ret, i;
2120 
2121  /* determine if the parameters for this input changed */
2122  need_reinit = ifilter->format != frame->format;
2123  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2124  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2125  need_reinit = 1;
2126 
2127  switch (ifilter->ist->st->codecpar->codec_type) {
2128  case AVMEDIA_TYPE_AUDIO:
2129  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2130  ifilter->channels != frame->channels ||
2131  ifilter->channel_layout != frame->channel_layout;
2132  break;
2133  case AVMEDIA_TYPE_VIDEO:
2134  need_reinit |= ifilter->width != frame->width ||
2135  ifilter->height != frame->height;
2136  break;
2137  }
2138 
2139  if (need_reinit) {
2140  ret = ifilter_parameters_from_frame(ifilter, frame);
2141  if (ret < 0)
2142  return ret;
2143  }
2144 
2145  /* (re)init the graph if possible, otherwise buffer the frame and return */
2146  if (need_reinit || !fg->graph) {
2147  for (i = 0; i < fg->nb_inputs; i++) {
2148  if (!ifilter_has_all_input_formats(fg)) {
2149  AVFrame *tmp = av_frame_clone(frame);
2150  if (!tmp)
2151  return AVERROR(ENOMEM);
2152  av_frame_unref(frame);
2153 
2154  if (!av_fifo_space(ifilter->frame_queue)) {
2155  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2156  if (ret < 0) {
2157  av_frame_free(&tmp);
2158  return ret;
2159  }
2160  }
2161  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2162  return 0;
2163  }
2164  }
2165 
2166  ret = reap_filters(1);
2167  if (ret < 0 && ret != AVERROR_EOF) {
2168  char errbuf[128];
2169  av_strerror(ret, errbuf, sizeof(errbuf));
2170 
2171  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2172  return ret;
2173  }
2174 
2175  ret = configure_filtergraph(fg);
2176  if (ret < 0) {
2177  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2178  return ret;
2179  }
2180  }
2181 
2183  if (ret < 0) {
2184  if (ret != AVERROR_EOF)
2185  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2186  return ret;
2187  }
2188 
2189  return 0;
2190 }
2191 
2192 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2193 {
2194  int i, j, ret;
2195 
2196  ifilter->eof = 1;
2197 
2198  if (ifilter->filter) {
2199  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2200  if (ret < 0)
2201  return ret;
2202  } else {
2203  // the filtergraph was never configured
2204  FilterGraph *fg = ifilter->graph;
2205  for (i = 0; i < fg->nb_inputs; i++)
2206  if (!fg->inputs[i]->eof)
2207  break;
2208  if (i == fg->nb_inputs) {
2209  // All the input streams have finished without the filtergraph
2210  // ever being configured.
2211  // Mark the output streams as finished.
2212  for (j = 0; j < fg->nb_outputs; j++)
2213  finish_output_stream(fg->outputs[j]->ost);
2214  }
2215  }
2216 
2217  return 0;
2218 }
2219 
2220 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2221 // There is the following difference: if you got a frame, you must call
2222 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2223 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2224 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2225 {
2226  int ret;
2227 
2228  *got_frame = 0;
2229 
2230  if (pkt) {
2231  ret = avcodec_send_packet(avctx, pkt);
2232  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2233  // decoded frames with avcodec_receive_frame() until done.
2234  if (ret < 0 && ret != AVERROR_EOF)
2235  return ret;
2236  }
2237 
2238  ret = avcodec_receive_frame(avctx, frame);
2239  if (ret < 0 && ret != AVERROR(EAGAIN))
2240  return ret;
2241  if (ret >= 0)
2242  *got_frame = 1;
2243 
2244  return 0;
2245 }
2246 
2247 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2248 {
2249  int i, ret;
2250  AVFrame *f;
2251 
2252  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2253  for (i = 0; i < ist->nb_filters; i++) {
2254  if (i < ist->nb_filters - 1) {
2255  f = ist->filter_frame;
2256  ret = av_frame_ref(f, decoded_frame);
2257  if (ret < 0)
2258  break;
2259  } else
2260  f = decoded_frame;
2261  ret = ifilter_send_frame(ist->filters[i], f);
2262  if (ret == AVERROR_EOF)
2263  ret = 0; /* ignore */
2264  if (ret < 0) {
2266  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2267  break;
2268  }
2269  }
2270  return ret;
2271 }
2272 
2273 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2274  int *decode_failed)
2275 {
2276  AVFrame *decoded_frame;
2277  AVCodecContext *avctx = ist->dec_ctx;
2278  int ret, err = 0;
2279  AVRational decoded_frame_tb;
2280 
2281  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2282  return AVERROR(ENOMEM);
2283  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2284  return AVERROR(ENOMEM);
2285  decoded_frame = ist->decoded_frame;
2286 
2288  ret = decode(avctx, decoded_frame, got_output, pkt);
2289  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2290  if (ret < 0)
2291  *decode_failed = 1;
2292 
2293  if (ret >= 0 && avctx->sample_rate <= 0) {
2294  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2295  ret = AVERROR_INVALIDDATA;
2296  }
2297 
2298  if (ret != AVERROR_EOF)
2299  check_decode_result(ist, got_output, ret);
2300 
2301  if (!*got_output || ret < 0)
2302  return ret;
2303 
2304  ist->samples_decoded += decoded_frame->nb_samples;
2305  ist->frames_decoded++;
2306 
2307 #if 1
2308  /* increment next_dts to use for the case where the input stream does not
2309  have timestamps or there are multiple frames in the packet */
2310  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2311  avctx->sample_rate;
2312  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2313  avctx->sample_rate;
2314 #endif
2315 
2316  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2317  decoded_frame_tb = ist->st->time_base;
2318  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2319  decoded_frame->pts = pkt->pts;
2320  decoded_frame_tb = ist->st->time_base;
2321  }else {
2322  decoded_frame->pts = ist->dts;
2323  decoded_frame_tb = AV_TIME_BASE_Q;
2324  }
2325  if (decoded_frame->pts != AV_NOPTS_VALUE)
2326  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2327  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2328  (AVRational){1, avctx->sample_rate});
2329  ist->nb_samples = decoded_frame->nb_samples;
2330  err = send_frame_to_filters(ist, decoded_frame);
2331 
2332  av_frame_unref(ist->filter_frame);
2333  av_frame_unref(decoded_frame);
2334  return err < 0 ? err : ret;
2335 }
2336 
2337 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2338  int *decode_failed)
2339 {
2340  AVFrame *decoded_frame;
2341  int i, ret = 0, err = 0;
2342  int64_t best_effort_timestamp;
2343  int64_t dts = AV_NOPTS_VALUE;
2344  AVPacket avpkt;
2345 
2346  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2347  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2348  // skip the packet.
2349  if (!eof && pkt && pkt->size == 0)
2350  return 0;
2351 
2352  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2353  return AVERROR(ENOMEM);
2354  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2355  return AVERROR(ENOMEM);
2356  decoded_frame = ist->decoded_frame;
2357  if (ist->dts != AV_NOPTS_VALUE)
2358  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2359  if (pkt) {
2360  avpkt = *pkt;
2361  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2362  }
2363 
2364  // The old code used to set dts on the drain packet, which does not work
2365  // with the new API anymore.
2366  if (eof) {
2367  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2368  if (!new)
2369  return AVERROR(ENOMEM);
2370  ist->dts_buffer = new;
2371  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2372  }
2373 
2375  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2376  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2377  if (ret < 0)
2378  *decode_failed = 1;
2379 
2380  // The following line may be required in some cases where there is no parser
2381  // or the parser does not has_b_frames correctly
2382  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2383  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2384  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2385  } else
2387  "video_delay is larger in decoder than demuxer %d > %d.\n"
2388  "If you want to help, upload a sample "
2389  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2390  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2391  ist->dec_ctx->has_b_frames,
2392  ist->st->codecpar->video_delay);
2393  }
2394 
2395  if (ret != AVERROR_EOF)
2396  check_decode_result(ist, got_output, ret);
2397 
2398  if (*got_output && ret >= 0) {
2399  if (ist->dec_ctx->width != decoded_frame->width ||
2400  ist->dec_ctx->height != decoded_frame->height ||
2401  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2402  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2403  decoded_frame->width,
2404  decoded_frame->height,
2405  decoded_frame->format,
2406  ist->dec_ctx->width,
2407  ist->dec_ctx->height,
2408  ist->dec_ctx->pix_fmt);
2409  }
2410  }
2411 
2412  if (!*got_output || ret < 0)
2413  return ret;
2414 
2415  if(ist->top_field_first>=0)
2416  decoded_frame->top_field_first = ist->top_field_first;
2417 
2418  ist->frames_decoded++;
2419 
2420  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2421  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2422  if (err < 0)
2423  goto fail;
2424  }
2425  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2426 
2427  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2428  *duration_pts = decoded_frame->pkt_duration;
2429 
2430  if (ist->framerate.num)
2431  best_effort_timestamp = ist->cfr_next_pts++;
2432 
2433  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2434  best_effort_timestamp = ist->dts_buffer[0];
2435 
2436  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2437  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2438  ist->nb_dts_buffer--;
2439  }
2440 
2441  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2442  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2443 
2444  if (ts != AV_NOPTS_VALUE)
2445  ist->next_pts = ist->pts = ts;
2446  }
2447 
2448  if (debug_ts) {
2449  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2450  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2451  ist->st->index, av_ts2str(decoded_frame->pts),
2452  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2453  best_effort_timestamp,
2454  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2455  decoded_frame->key_frame, decoded_frame->pict_type,
2456  ist->st->time_base.num, ist->st->time_base.den);
2457  }
2458 
2459  if (ist->st->sample_aspect_ratio.num)
2460  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2461 
2462  err = send_frame_to_filters(ist, decoded_frame);
2463 
2464 fail:
2466  av_frame_unref(decoded_frame);
2467  return err < 0 ? err : ret;
2468 }
2469 
2470 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2471  int *decode_failed)
2472 {
2473  AVSubtitle subtitle;
2474  int free_sub = 1;
2475  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2476  &subtitle, got_output, pkt);
2477 
2478  check_decode_result(NULL, got_output, ret);
2479 
2480  if (ret < 0 || !*got_output) {
2481  *decode_failed = 1;
2482  if (!pkt->size)
2483  sub2video_flush(ist);
2484  return ret;
2485  }
2486 
2487  if (ist->fix_sub_duration) {
2488  int end = 1;
2489  if (ist->prev_sub.got_output) {
2490  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2491  1000, AV_TIME_BASE);
2492  if (end < ist->prev_sub.subtitle.end_display_time) {
2493  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2494  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2496  end <= 0 ? ", dropping it" : "");
2498  }
2499  }
2500  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2501  FFSWAP(int, ret, ist->prev_sub.ret);
2502  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2503  if (end <= 0)
2504  goto out;
2505  }
2506 
2507  if (!*got_output)
2508  return ret;
2509 
2510  if (ist->sub2video.frame) {
2511  sub2video_update(ist, &subtitle);
2512  } else if (ist->nb_filters) {
2513  if (!ist->sub2video.sub_queue)
2514  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2515  if (!ist->sub2video.sub_queue)
2516  exit_program(1);
2517  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2519  if (ret < 0)
2520  exit_program(1);
2521  }
2522  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2523  free_sub = 0;
2524  }
2525 
2526  if (!subtitle.num_rects)
2527  goto out;
2528 
2529  ist->frames_decoded++;
2530 
2531  for (i = 0; i < nb_output_streams; i++) {
2532  OutputStream *ost = output_streams[i];
2533 
2534  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2535  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2536  continue;
2537 
2538  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2539  }
2540 
2541 out:
2542  if (free_sub)
2543  avsubtitle_free(&subtitle);
2544  return ret;
2545 }
2546 
2548 {
2549  int i, ret;
2550  /* TODO keep pts also in stream time base to avoid converting back */
2551  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2553 
2554  for (i = 0; i < ist->nb_filters; i++) {
2555  ret = ifilter_send_eof(ist->filters[i], pts);
2556  if (ret < 0)
2557  return ret;
2558  }
2559  return 0;
2560 }
2561 
2562 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2563 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2564 {
2565  int ret = 0, i;
2566  int repeating = 0;
2567  int eof_reached = 0;
2568 
2569  AVPacket avpkt;
2570  if (!ist->saw_first_ts) {
2571  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2572  ist->pts = 0;
2573  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2574  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2575  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2576  }
2577  ist->saw_first_ts = 1;
2578  }
2579 
2580  if (ist->next_dts == AV_NOPTS_VALUE)
2581  ist->next_dts = ist->dts;
2582  if (ist->next_pts == AV_NOPTS_VALUE)
2583  ist->next_pts = ist->pts;
2584 
2585  if (!pkt) {
2586  /* EOF handling */
2587  av_init_packet(&avpkt);
2588  avpkt.data = NULL;
2589  avpkt.size = 0;
2590  } else {
2591  avpkt = *pkt;
2592  }
2593 
2594  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2595  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2596  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2597  ist->next_pts = ist->pts = ist->dts;
2598  }
2599 
2600  // while we have more to decode or while the decoder did output something on EOF
2601  while (ist->decoding_needed) {
2602  int64_t duration_dts = 0;
2603  int64_t duration_pts = 0;
2604  int got_output = 0;
2605  int decode_failed = 0;
2606 
2607  ist->pts = ist->next_pts;
2608  ist->dts = ist->next_dts;
2609 
2610  switch (ist->dec_ctx->codec_type) {
2611  case AVMEDIA_TYPE_AUDIO:
2612  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2613  &decode_failed);
2614  break;
2615  case AVMEDIA_TYPE_VIDEO:
2616  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2617  &decode_failed);
2618  if (!repeating || !pkt || got_output) {
2619  if (pkt && pkt->duration) {
2620  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2621  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2623  duration_dts = ((int64_t)AV_TIME_BASE *
2624  ist->dec_ctx->framerate.den * ticks) /
2626  }
2627 
2628  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2629  ist->next_dts += duration_dts;
2630  }else
2631  ist->next_dts = AV_NOPTS_VALUE;
2632  }
2633 
2634  if (got_output) {
2635  if (duration_pts > 0) {
2636  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2637  } else {
2638  ist->next_pts += duration_dts;
2639  }
2640  }
2641  break;
2642  case AVMEDIA_TYPE_SUBTITLE:
2643  if (repeating)
2644  break;
2645  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2646  if (!pkt && ret >= 0)
2647  ret = AVERROR_EOF;
2648  break;
2649  default:
2650  return -1;
2651  }
2652 
2653  if (ret == AVERROR_EOF) {
2654  eof_reached = 1;
2655  break;
2656  }
2657 
2658  if (ret < 0) {
2659  if (decode_failed) {
2660  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2661  ist->file_index, ist->st->index, av_err2str(ret));
2662  } else {
2663  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2664  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2665  }
2666  if (!decode_failed || exit_on_error)
2667  exit_program(1);
2668  break;
2669  }
2670 
2671  if (got_output)
2672  ist->got_output = 1;
2673 
2674  if (!got_output)
2675  break;
2676 
2677  // During draining, we might get multiple output frames in this loop.
2678  // ffmpeg.c does not drain the filter chain on configuration changes,
2679  // which means if we send multiple frames at once to the filters, and
2680  // one of those frames changes configuration, the buffered frames will
2681  // be lost. This can upset certain FATE tests.
2682  // Decode only 1 frame per call on EOF to appease these FATE tests.
2683  // The ideal solution would be to rewrite decoding to use the new
2684  // decoding API in a better way.
2685  if (!pkt)
2686  break;
2687 
2688  repeating = 1;
2689  }
2690 
2691  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2692  /* except when looping we need to flush but not to send an EOF */
2693  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2694  int ret = send_filter_eof(ist);
2695  if (ret < 0) {
2696  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2697  exit_program(1);
2698  }
2699  }
2700 
2701  /* handle stream copy */
2702  if (!ist->decoding_needed && pkt) {
2703  ist->dts = ist->next_dts;
2704  switch (ist->dec_ctx->codec_type) {
2705  case AVMEDIA_TYPE_AUDIO:
2706  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2707  ist->dec_ctx->sample_rate;
2708  break;
2709  case AVMEDIA_TYPE_VIDEO:
2710  if (ist->framerate.num) {
2711  // TODO: Remove work-around for c99-to-c89 issue 7
2712  AVRational time_base_q = AV_TIME_BASE_Q;
2713  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2714  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2715  } else if (pkt->duration) {
2716  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2717  } else if(ist->dec_ctx->framerate.num != 0) {
2718  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2719  ist->next_dts += ((int64_t)AV_TIME_BASE *
2720  ist->dec_ctx->framerate.den * ticks) /
2722  }
2723  break;
2724  }
2725  ist->pts = ist->dts;
2726  ist->next_pts = ist->next_dts;
2727  }
2728  for (i = 0; i < nb_output_streams; i++) {
2729  OutputStream *ost = output_streams[i];
2730 
2731  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2732  continue;
2733 
2734  do_streamcopy(ist, ost, pkt);
2735  }
2736 
2737  return !eof_reached;
2738 }
2739 
2740 static void print_sdp(void)
2741 {
2742  char sdp[16384];
2743  int i;
2744  int j;
2745  AVIOContext *sdp_pb;
2746  AVFormatContext **avc;
2747 
2748  for (i = 0; i < nb_output_files; i++) {
2749  if (!output_files[i]->header_written)
2750  return;
2751  }
2752 
2753  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2754  if (!avc)
2755  exit_program(1);
2756  for (i = 0, j = 0; i < nb_output_files; i++) {
2757  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2758  avc[j] = output_files[i]->ctx;
2759  j++;
2760  }
2761  }
2762 
2763  if (!j)
2764  goto fail;
2765 
2766  av_sdp_create(avc, j, sdp, sizeof(sdp));
2767 
2768  if (!sdp_filename) {
2769  printf("SDP:\n%s\n", sdp);
2770  fflush(stdout);
2771  } else {
2772  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2773  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2774  } else {
2775  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2776  avio_closep(&sdp_pb);
2778  }
2779  }
2780 
2781 fail:
2782  av_freep(&avc);
2783 }
2784 
2785 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt, enum HWAccelID selected_hwaccel_id)
2786 {
2787  int i;
2788  for (i = 0; hwaccels[i].name; i++)
2789  if (hwaccels[i].pix_fmt == pix_fmt &&
2790  (!selected_hwaccel_id || selected_hwaccel_id == HWACCEL_AUTO || hwaccels[i].id == selected_hwaccel_id))
2791  return &hwaccels[i];
2792  return NULL;
2793 }
2794 
2796 {
2797  InputStream *ist = s->opaque;
2798  const enum AVPixelFormat *p;
2799  int ret;
2800 
2801  for (p = pix_fmts; *p != -1; p++) {
2803  const HWAccel *hwaccel;
2804 
2805  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2806  break;
2807 
2808  hwaccel = get_hwaccel(*p, ist->hwaccel_id);
2809  if (!hwaccel ||
2810  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2811  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2812  continue;
2813 
2814  ret = hwaccel->init(s);
2815  if (ret < 0) {
2816  if (ist->hwaccel_id == hwaccel->id) {
2818  "%s hwaccel requested for input stream #%d:%d, "
2819  "but cannot be initialized.\n", hwaccel->name,
2820  ist->file_index, ist->st->index);
2821  return AV_PIX_FMT_NONE;
2822  }
2823  continue;
2824  }
2825 
2826  if (ist->hw_frames_ctx) {
2828  if (!s->hw_frames_ctx)
2829  return AV_PIX_FMT_NONE;
2830  }
2831 
2832  ist->active_hwaccel_id = hwaccel->id;
2833  ist->hwaccel_pix_fmt = *p;
2834  break;
2835  }
2836 
2837  return *p;
2838 }
2839 
2841 {
2842  InputStream *ist = s->opaque;
2843 
2844  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2845  return ist->hwaccel_get_buffer(s, frame, flags);
2846 
2847  return avcodec_default_get_buffer2(s, frame, flags);
2848 }
2849 
2850 static int init_input_stream(int ist_index, char *error, int error_len)
2851 {
2852  int ret;
2853  InputStream *ist = input_streams[ist_index];
2854 
2855  if (ist->decoding_needed) {
2856  AVCodec *codec = ist->dec;
2857  if (!codec) {
2858  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2859  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2860  return AVERROR(EINVAL);
2861  }
2862 
2863  ist->dec_ctx->opaque = ist;
2864  ist->dec_ctx->get_format = get_format;
2865  ist->dec_ctx->get_buffer2 = get_buffer;
2866  ist->dec_ctx->thread_safe_callbacks = 1;
2867 
2868  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2869  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2870  (ist->decoding_needed & DECODING_FOR_OST)) {
2871  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2873  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2874  }
2875 
2876  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2877 
2878  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2879  * audio, and video decoders such as cuvid or mediacodec */
2880  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2881 
2882  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2883  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2884  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2886  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2887 
2888  ret = hw_device_setup_for_decode(ist);
2889  if (ret < 0) {
2890  snprintf(error, error_len, "Device setup failed for "
2891  "decoder on input stream #%d:%d : %s",
2892  ist->file_index, ist->st->index, av_err2str(ret));
2893  return ret;
2894  }
2895 
2896  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2897  if (ret == AVERROR_EXPERIMENTAL)
2898  abort_codec_experimental(codec, 0);
2899 
2900  snprintf(error, error_len,
2901  "Error while opening decoder for input stream "
2902  "#%d:%d : %s",
2903  ist->file_index, ist->st->index, av_err2str(ret));
2904  return ret;
2905  }
2907  }
2908 
2909  ist->next_pts = AV_NOPTS_VALUE;
2910  ist->next_dts = AV_NOPTS_VALUE;
2911 
2912  return 0;
2913 }
2914 
2916 {
2917  if (ost->source_index >= 0)
2918  return input_streams[ost->source_index];
2919  return NULL;
2920 }
2921 
2922 static int compare_int64(const void *a, const void *b)
2923 {
2924  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2925 }
2926 
2927 /* open the muxer when all the streams are initialized */
2928 static int check_init_output_file(OutputFile *of, int file_index)
2929 {
2930  int ret, i;
2931 
2932  for (i = 0; i < of->ctx->nb_streams; i++) {
2933  OutputStream *ost = output_streams[of->ost_index + i];
2934  if (!ost->initialized)
2935  return 0;
2936  }
2937 
2938  of->ctx->interrupt_callback = int_cb;
2939 
2940  ret = avformat_write_header(of->ctx, &of->opts);
2941  if (ret < 0) {
2943  "Could not write header for output file #%d "
2944  "(incorrect codec parameters ?): %s\n",
2945  file_index, av_err2str(ret));
2946  return ret;
2947  }
2948  //assert_avoptions(of->opts);
2949  of->header_written = 1;
2950 
2951  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2952 
2953  if (sdp_filename || want_sdp)
2954  print_sdp();
2955 
2956  /* flush the muxing queues */
2957  for (i = 0; i < of->ctx->nb_streams; i++) {
2958  OutputStream *ost = output_streams[of->ost_index + i];
2959 
2960  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2961  if (!av_fifo_size(ost->muxing_queue))
2962  ost->mux_timebase = ost->st->time_base;
2963 
2964  while (av_fifo_size(ost->muxing_queue)) {
2965  AVPacket pkt;
2966  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2967  write_packet(of, &pkt, ost, 1);
2968  }
2969  }
2970 
2971  return 0;
2972 }
2973 
2975 {
2976  AVBSFContext *ctx;
2977  int i, ret;
2978 
2979  if (!ost->nb_bitstream_filters)
2980  return 0;
2981 
2982  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2983  ctx = ost->bsf_ctx[i];
2984 
2985  ret = avcodec_parameters_copy(ctx->par_in,
2986  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2987  if (ret < 0)
2988  return ret;
2989 
2990  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2991 
2992  ret = av_bsf_init(ctx);
2993  if (ret < 0) {
2994  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2995  ost->bsf_ctx[i]->filter->name);
2996  return ret;
2997  }
2998  }
2999 
3000  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3001  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3002  if (ret < 0)
3003  return ret;
3004 
3005  ost->st->time_base = ctx->time_base_out;
3006 
3007  return 0;
3008 }
3009 
3011 {
3012  OutputFile *of = output_files[ost->file_index];
3013  InputStream *ist = get_input_stream(ost);
3014  AVCodecParameters *par_dst = ost->st->codecpar;
3015  AVCodecParameters *par_src = ost->ref_par;
3016  AVRational sar;
3017  int i, ret;
3018  uint32_t codec_tag = par_dst->codec_tag;
3019 
3020  av_assert0(ist && !ost->filter);
3021 
3022  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3023  if (ret >= 0)
3024  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3025  if (ret < 0) {
3027  "Error setting up codec context options.\n");
3028  return ret;
3029  }
3031 
3032  if (!codec_tag) {
3033  unsigned int codec_tag_tmp;
3034  if (!of->ctx->oformat->codec_tag ||
3035  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3036  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3037  codec_tag = par_src->codec_tag;
3038  }
3039 
3040  ret = avcodec_parameters_copy(par_dst, par_src);
3041  if (ret < 0)
3042  return ret;
3043 
3044  par_dst->codec_tag = codec_tag;
3045 
3046  if (!ost->frame_rate.num)
3047  ost->frame_rate = ist->framerate;
3048  ost->st->avg_frame_rate = ost->frame_rate;
3049 
3051  if (ret < 0)
3052  return ret;
3053 
3054  // copy timebase while removing common factors
3055  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3057 
3058  // copy estimated duration as a hint to the muxer
3059  if (ost->st->duration <= 0 && ist->st->duration > 0)
3060  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3061 
3062  // copy disposition
3063  ost->st->disposition = ist->st->disposition;
3064 
3065  if (ist->st->nb_side_data) {
3066  for (i = 0; i < ist->st->nb_side_data; i++) {
3067  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3068  uint8_t *dst_data;
3069 
3070  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3071  if (!dst_data)
3072  return AVERROR(ENOMEM);
3073  memcpy(dst_data, sd_src->data, sd_src->size);
3074  }
3075  }
3076 
3077  if (ost->rotate_overridden) {
3079  sizeof(int32_t) * 9);
3080  if (sd)
3081  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3082  }
3083 
3084  ost->parser = av_parser_init(par_dst->codec_id);
3085  ost->parser_avctx = avcodec_alloc_context3(NULL);
3086  if (!ost->parser_avctx)
3087  return AVERROR(ENOMEM);
3088 
3089  switch (par_dst->codec_type) {
3090  case AVMEDIA_TYPE_AUDIO:
3091  if (audio_volume != 256) {
3092  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3093  exit_program(1);
3094  }
3095  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3096  par_dst->block_align= 0;
3097  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3098  par_dst->block_align= 0;
3099  break;
3100  case AVMEDIA_TYPE_VIDEO:
3101  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3102  sar =
3103  av_mul_q(ost->frame_aspect_ratio,
3104  (AVRational){ par_dst->height, par_dst->width });
3105  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3106  "with stream copy may produce invalid files\n");
3107  }
3108  else if (ist->st->sample_aspect_ratio.num)
3109  sar = ist->st->sample_aspect_ratio;
3110  else
3111  sar = par_src->sample_aspect_ratio;
3112  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3113  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3114  ost->st->r_frame_rate = ist->st->r_frame_rate;
3115  break;
3116  }
3117 
3118  ost->mux_timebase = ist->st->time_base;
3119 
3120  return 0;
3121 }
3122 
3124 {
3125  AVDictionaryEntry *e;
3126 
3127  uint8_t *encoder_string;
3128  int encoder_string_len;
3129  int format_flags = 0;
3130  int codec_flags = ost->enc_ctx->flags;
3131 
3132  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3133  return;
3134 
3135  e = av_dict_get(of->opts, "fflags", NULL, 0);
3136  if (e) {
3137  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3138  if (!o)
3139  return;
3140  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3141  }
3142  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3143  if (e) {
3144  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3145  if (!o)
3146  return;
3147  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3148  }
3149 
3150  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3151  encoder_string = av_mallocz(encoder_string_len);
3152  if (!encoder_string)
3153  exit_program(1);
3154 
3155  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3156  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3157  else
3158  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3159  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3160  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3162 }
3163 
3164 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3165  AVCodecContext *avctx)
3166 {
3167  char *p;
3168  int n = 1, i, size, index = 0;
3169  int64_t t, *pts;
3170 
3171  for (p = kf; *p; p++)
3172  if (*p == ',')
3173  n++;
3174  size = n;
3175  pts = av_malloc_array(size, sizeof(*pts));
3176  if (!pts) {
3177  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3178  exit_program(1);
3179  }
3180 
3181  p = kf;
3182  for (i = 0; i < n; i++) {
3183  char *next = strchr(p, ',');
3184 
3185  if (next)
3186  *next++ = 0;
3187 
3188  if (!memcmp(p, "chapters", 8)) {
3189 
3190  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3191  int j;
3192 
3193  if (avf->nb_chapters > INT_MAX - size ||
3194  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3195  sizeof(*pts)))) {
3197  "Could not allocate forced key frames array.\n");
3198  exit_program(1);
3199  }
3200  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3201  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3202 
3203  for (j = 0; j < avf->nb_chapters; j++) {
3204  AVChapter *c = avf->chapters[j];
3205  av_assert1(index < size);
3206  pts[index++] = av_rescale_q(c->start, c->time_base,
3207  avctx->time_base) + t;
3208  }
3209 
3210  } else {
3211 
3212  t = parse_time_or_die("force_key_frames", p, 1);
3213  av_assert1(index < size);
3214  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3215 
3216  }
3217 
3218  p = next;
3219  }
3220 
3221  av_assert0(index == size);
3222  qsort(pts, size, sizeof(*pts), compare_int64);
3223  ost->forced_kf_count = size;
3224  ost->forced_kf_pts = pts;
3225 }
3226 
3227 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3228 {
3229  InputStream *ist = get_input_stream(ost);
3230  AVCodecContext *enc_ctx = ost->enc_ctx;
3231  AVFormatContext *oc;
3232 
3233  if (ost->enc_timebase.num > 0) {
3234  enc_ctx->time_base = ost->enc_timebase;
3235  return;
3236  }
3237 
3238  if (ost->enc_timebase.num < 0) {
3239  if (ist) {
3240  enc_ctx->time_base = ist->st->time_base;
3241  return;
3242  }
3243 
3244  oc = output_files[ost->file_index]->ctx;
3245  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3246  }
3247 
3248  enc_ctx->time_base = default_time_base;
3249 }
3250 
3252 {
3253  InputStream *ist = get_input_stream(ost);
3254  AVCodecContext *enc_ctx = ost->enc_ctx;
3256  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3257  int j, ret;
3258 
3259  set_encoder_id(output_files[ost->file_index], ost);
3260 
3261  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3262  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3263  // which have to be filtered out to prevent leaking them to output files.
3264  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3265 
3266  if (ist) {
3267  ost->st->disposition = ist->st->disposition;
3268 
3269  dec_ctx = ist->dec_ctx;
3270 
3271  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3272  } else {
3273  for (j = 0; j < oc->nb_streams; j++) {
3274  AVStream *st = oc->streams[j];
3275  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3276  break;
3277  }
3278  if (j == oc->nb_streams)
3279  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3282  }
3283 
3284  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3285  if (!ost->frame_rate.num)
3287  if (ist && !ost->frame_rate.num)
3288  ost->frame_rate = ist->framerate;
3289  if (ist && !ost->frame_rate.num)
3290  ost->frame_rate = ist->st->r_frame_rate;
3291  if (ist && !ost->frame_rate.num) {
3292  ost->frame_rate = (AVRational){25, 1};
3294  "No information "
3295  "about the input framerate is available. Falling "
3296  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3297  "if you want a different framerate.\n",
3298  ost->file_index, ost->index);
3299  }
3300 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3301  if (ost->enc->supported_framerates && !ost->force_fps) {
3302  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3303  ost->frame_rate = ost->enc->supported_framerates[idx];
3304  }
3305  // reduce frame rate for mpeg4 to be within the spec limits
3306  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3307  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3308  ost->frame_rate.num, ost->frame_rate.den, 65535);
3309  }
3310  }
3311 
3312  switch (enc_ctx->codec_type) {
3313  case AVMEDIA_TYPE_AUDIO:
3315  if (dec_ctx)
3316  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3317  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3321 
3322  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3323  break;
3324 
3325  case AVMEDIA_TYPE_VIDEO:
3327 
3328  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3330  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3332  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3333  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3334  }
3335  for (j = 0; j < ost->forced_kf_count; j++)
3336  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3338  enc_ctx->time_base);
3339 
3340  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3341  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3342  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3343  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3344  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3346 
3347  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3348  if (dec_ctx)
3349  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3350  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3351 
3352  enc_ctx->framerate = ost->frame_rate;
3353 
3354  ost->st->avg_frame_rate = ost->frame_rate;
3355 
3356  if (!dec_ctx ||
3357  enc_ctx->width != dec_ctx->width ||
3358  enc_ctx->height != dec_ctx->height ||
3359  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3361  }
3362 
3363  if (ost->forced_keyframes) {
3364  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3367  if (ret < 0) {
3369  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3370  return ret;
3371  }
3376 
3377  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3378  // parse it only for static kf timings
3379  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3381  }
3382  }
3383  break;
3384  case AVMEDIA_TYPE_SUBTITLE:
3385  enc_ctx->time_base = AV_TIME_BASE_Q;
3386  if (!enc_ctx->width) {
3387  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3388  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3389  }
3390  break;
3391  case AVMEDIA_TYPE_DATA:
3392  break;
3393  default:
3394  abort();
3395  break;
3396  }
3397 
3398  ost->mux_timebase = enc_ctx->time_base;
3399 
3400  return 0;
3401 }
3402 
3403 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3404 {
3405  int ret = 0;
3406 
3407  if (ost->encoding_needed) {
3408  AVCodec *codec = ost->enc;
3409  AVCodecContext *dec = NULL;
3410  InputStream *ist;
3411 
3412  ret = init_output_stream_encode(ost);
3413  if (ret < 0)
3414  return ret;
3415 
3416  if ((ist = get_input_stream(ost)))
3417  dec = ist->dec_ctx;
3418  if (dec && dec->subtitle_header) {
3419  /* ASS code assumes this buffer is null terminated so add extra byte. */
3421  if (!ost->enc_ctx->subtitle_header)
3422  return AVERROR(ENOMEM);
3423  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3425  }
3426  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3427  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3428  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3429  !codec->defaults &&
3430  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3431  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3432  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3433 
3434  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3438  if (!ost->enc_ctx->hw_frames_ctx)
3439  return AVERROR(ENOMEM);
3440  } else {
3441  ret = hw_device_setup_for_encode(ost);
3442  if (ret < 0) {
3443  snprintf(error, error_len, "Device setup failed for "
3444  "encoder on output stream #%d:%d : %s",
3445  ost->file_index, ost->index, av_err2str(ret));
3446  return ret;
3447  }
3448  }
3449 
3450  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3451  if (ret == AVERROR_EXPERIMENTAL)
3452  abort_codec_experimental(codec, 1);
3453  snprintf(error, error_len,
3454  "Error while opening encoder for output stream #%d:%d - "
3455  "maybe incorrect parameters such as bit_rate, rate, width or height",
3456  ost->file_index, ost->index);
3457  return ret;
3458  }
3459  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3460  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3462  ost->enc_ctx->frame_size);
3464  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3465  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3466  " It takes bits/s as argument, not kbits/s\n");
3467 
3469  if (ret < 0) {
3471  "Error initializing the output stream codec context.\n");
3472  exit_program(1);
3473  }
3474  /*
3475  * FIXME: ost->st->codec should't be needed here anymore.
3476  */
3477  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3478  if (ret < 0)
3479  return ret;
3480 
3481  if (ost->enc_ctx->nb_coded_side_data) {
3482  int i;
3483 
3484  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3485  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3486  uint8_t *dst_data;
3487 
3488  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3489  if (!dst_data)
3490  return AVERROR(ENOMEM);
3491  memcpy(dst_data, sd_src->data, sd_src->size);
3492  }
3493  }
3494 
3495  /*
3496  * Add global input side data. For now this is naive, and copies it
3497  * from the input stream's global side data. All side data should
3498  * really be funneled over AVFrame and libavfilter, then added back to
3499  * packet side data, and then potentially using the first packet for
3500  * global side data.
3501  */
3502  if (ist) {
3503  int i;
3504  for (i = 0; i < ist->st->nb_side_data; i++) {
3505  AVPacketSideData *sd = &ist->st->side_data[i];
3506  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3507  if (!dst)
3508  return AVERROR(ENOMEM);
3509  memcpy(dst, sd->data, sd->size);
3510  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3511  av_display_rotation_set((uint32_t *)dst, 0);
3512  }
3513  }
3514 
3515  // copy timebase while removing common factors
3516  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3517  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3518 
3519  // copy estimated duration as a hint to the muxer
3520  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3521  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3522 
3523  ost->st->codec->codec= ost->enc_ctx->codec;
3524  } else if (ost->stream_copy) {
3525  ret = init_output_stream_streamcopy(ost);
3526  if (ret < 0)
3527  return ret;
3528 
3529  /*
3530  * FIXME: will the codec context used by the parser during streamcopy
3531  * This should go away with the new parser API.
3532  */
3533  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3534  if (ret < 0)
3535  return ret;
3536  }
3537 
3538  // parse user provided disposition, and update stream values
3539  if (ost->disposition) {
3540  static const AVOption opts[] = {
3541  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3542  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3543  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3544  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3545  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3546  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3547  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3548  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3549  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3550  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3551  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3552  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3553  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3554  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3555  { NULL },
3556  };
3557  static const AVClass class = {
3558  .class_name = "",
3559  .item_name = av_default_item_name,
3560  .option = opts,
3561  .version = LIBAVUTIL_VERSION_INT,
3562  };
3563  const AVClass *pclass = &class;
3564 
3565  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3566  if (ret < 0)
3567  return ret;
3568  }
3569 
3570  /* initialize bitstream filters for the output stream
3571  * needs to be done here, because the codec id for streamcopy is not
3572  * known until now */
3573  ret = init_output_bsfs(ost);
3574  if (ret < 0)
3575  return ret;
3576 
3577  ost->initialized = 1;
3578 
3579  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3580  if (ret < 0)
3581  return ret;
3582 
3583  return ret;
3584 }
3585 
3586 static void report_new_stream(int input_index, AVPacket *pkt)
3587 {
3588  InputFile *file = input_files[input_index];
3589  AVStream *st = file->ctx->streams[pkt->stream_index];
3590 
3591  if (pkt->stream_index < file->nb_streams_warn)
3592  return;
3593  av_log(file->ctx, AV_LOG_WARNING,
3594  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3596  input_index, pkt->stream_index,
3597  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3598  file->nb_streams_warn = pkt->stream_index + 1;
3599 }
3600 
3601 static int transcode_init(void)
3602 {
3603  int ret = 0, i, j, k;
3604  AVFormatContext *oc;
3605  OutputStream *ost;
3606  InputStream *ist;
3607  char error[1024] = {0};
3608 
3609  for (i = 0; i < nb_filtergraphs; i++) {
3610  FilterGraph *fg = filtergraphs[i];
3611  for (j = 0; j < fg->nb_outputs; j++) {
3612  OutputFilter *ofilter = fg->outputs[j];
3613  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3614  continue;
3615  if (fg->nb_inputs != 1)
3616  continue;
3617  for (k = nb_input_streams-1; k >= 0 ; k--)
3618  if (fg->inputs[0]->ist == input_streams[k])
3619  break;
3620  ofilter->ost->source_index = k;
3621  }
3622  }
3623 
3624  /* init framerate emulation */
3625  for (i = 0; i < nb_input_files; i++) {
3626  InputFile *ifile = input_files[i];
3627  if (ifile->rate_emu)
3628  for (j = 0; j < ifile->nb_streams; j++)
3629  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3630  }
3631 
3632  /* init input streams */
3633  for (i = 0; i < nb_input_streams; i++)
3634  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3635  for (i = 0; i < nb_output_streams; i++) {
3636  ost = output_streams[i];
3637  avcodec_close(ost->enc_ctx);
3638  }
3639  goto dump_format;
3640  }
3641 
3642  /* open each encoder */
3643  for (i = 0; i < nb_output_streams; i++) {
3644  // skip streams fed from filtergraphs until we have a frame for them
3645  if (output_streams[i]->filter)
3646  continue;
3647 
3648  ret = init_output_stream(output_streams[i], error, sizeof(error));
3649  if (ret < 0)
3650  goto dump_format;
3651  }
3652 
3653  /* discard unused programs */
3654  for (i = 0; i < nb_input_files; i++) {
3655  InputFile *ifile = input_files[i];
3656  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3657  AVProgram *p = ifile->ctx->programs[j];
3658  int discard = AVDISCARD_ALL;
3659 
3660  for (k = 0; k < p->nb_stream_indexes; k++)
3661  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3662  discard = AVDISCARD_DEFAULT;
3663  break;
3664  }
3665  p->discard = discard;
3666  }
3667  }
3668 
3669  /* write headers for files with no streams */
3670  for (i = 0; i < nb_output_files; i++) {
3671  oc = output_files[i]->ctx;
3672  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3673  ret = check_init_output_file(output_files[i], i);
3674  if (ret < 0)
3675  goto dump_format;
3676  }
3677  }
3678 
3679  dump_format:
3680  /* dump the stream mapping */
3681  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3682  for (i = 0; i < nb_input_streams; i++) {
3683  ist = input_streams[i];
3684 
3685  for (j = 0; j < ist->nb_filters; j++) {
3686  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3687  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3688  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3689  ist->filters[j]->name);
3690  if (nb_filtergraphs > 1)
3691  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3692  av_log(NULL, AV_LOG_INFO, "\n");
3693  }
3694  }
3695  }
3696 
3697  for (i = 0; i < nb_output_streams; i++) {
3698  ost = output_streams[i];
3699 
3700  if (ost->attachment_filename) {
3701  /* an attached file */
3702  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3703  ost->attachment_filename, ost->file_index, ost->index);
3704  continue;
3705  }
3706 
3707  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3708  /* output from a complex graph */
3709  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3710  if (nb_filtergraphs > 1)
3711  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3712 
3713  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3714  ost->index, ost->enc ? ost->enc->name : "?");
3715  continue;
3716  }
3717 
3718  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3719  input_streams[ost->source_index]->file_index,
3720  input_streams[ost->source_index]->st->index,
3721  ost->file_index,
3722  ost->index);
3723  if (ost->sync_ist != input_streams[ost->source_index])
3724  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3725  ost->sync_ist->file_index,
3726  ost->sync_ist->st->index);
3727  if (ost->stream_copy)
3728  av_log(NULL, AV_LOG_INFO, " (copy)");
3729  else {
3730  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3731  const AVCodec *out_codec = ost->enc;
3732  const char *decoder_name = "?";
3733  const char *in_codec_name = "?";
3734  const char *encoder_name = "?";
3735  const char *out_codec_name = "?";
3736  const AVCodecDescriptor *desc;
3737 
3738  if (in_codec) {
3739  decoder_name = in_codec->name;
3740  desc = avcodec_descriptor_get(in_codec->id);
3741  if (desc)
3742  in_codec_name = desc->name;
3743  if (!strcmp(decoder_name, in_codec_name))
3744  decoder_name = "native";
3745  }
3746 
3747  if (out_codec) {
3748  encoder_name = out_codec->name;
3749  desc = avcodec_descriptor_get(out_codec->id);
3750  if (desc)
3751  out_codec_name = desc->name;
3752  if (!strcmp(encoder_name, out_codec_name))
3753  encoder_name = "native";
3754  }
3755 
3756  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3757  in_codec_name, decoder_name,
3758  out_codec_name, encoder_name);
3759  }
3760  av_log(NULL, AV_LOG_INFO, "\n");
3761  }
3762 
3763  if (ret) {
3764  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3765  return ret;
3766  }
3767 
3769 
3770  return 0;
3771 }
3772 
3773 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3774 static int need_output(void)
3775 {
3776  int i;
3777 
3778  for (i = 0; i < nb_output_streams; i++) {
3779  OutputStream *ost = output_streams[i];
3780  OutputFile *of = output_files[ost->file_index];
3781  AVFormatContext *os = output_files[ost->file_index]->ctx;
3782 
3783  if (ost->finished ||
3784  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3785  continue;
3786  if (ost->frame_number >= ost->max_frames) {
3787  int j;
3788  for (j = 0; j < of->ctx->nb_streams; j++)
3789  close_output_stream(output_streams[of->ost_index + j]);
3790  continue;
3791  }
3792 
3793  return 1;
3794  }
3795 
3796  return 0;
3797 }
3798 
3799 /**
3800  * Select the output stream to process.
3801  *
3802  * @return selected output stream, or NULL if none available
3803  */
3805 {
3806  int i;
3807  int64_t opts_min = INT64_MAX;
3808  OutputStream *ost_min = NULL;
3809 
3810  for (i = 0; i < nb_output_streams; i++) {
3811  OutputStream *ost = output_streams[i];
3812  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3813  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3814  AV_TIME_BASE_Q);
3815  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3816  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3817 
3818  if (!ost->initialized && !ost->inputs_done)
3819  return ost;
3820 
3821  if (!ost->finished && opts < opts_min) {
3822  opts_min = opts;
3823  ost_min = ost->unavailable ? NULL : ost;
3824  }
3825  }
3826  return ost_min;
3827 }
3828 
3829 static void set_tty_echo(int on)
3830 {
3831 #if HAVE_TERMIOS_H
3832  struct termios tty;
3833  if (tcgetattr(0, &tty) == 0) {
3834  if (on) tty.c_lflag |= ECHO;
3835  else tty.c_lflag &= ~ECHO;
3836  tcsetattr(0, TCSANOW, &tty);
3837  }
3838 #endif
3839 }
3840 
3842 {
3843  int i, ret, key;
3844  static int64_t last_time;
3845  if (received_nb_signals)
3846  return AVERROR_EXIT;
3847  /* read_key() returns 0 on EOF */
3848  if(cur_time - last_time >= 100000 && !run_as_daemon){
3849  key = read_key();
3850  last_time = cur_time;
3851  }else
3852  key = -1;
3853  if (key == 'q')
3854  return AVERROR_EXIT;
3855  if (key == '+') av_log_set_level(av_log_get_level()+10);
3856  if (key == '-') av_log_set_level(av_log_get_level()-10);
3857  if (key == 's') qp_hist ^= 1;
3858  if (key == 'h'){
3859  if (do_hex_dump){
3860  do_hex_dump = do_pkt_dump = 0;
3861  } else if(do_pkt_dump){
3862  do_hex_dump = 1;
3863  } else
3864  do_pkt_dump = 1;
3866  }
3867  if (key == 'c' || key == 'C'){
3868  char buf[4096], target[64], command[256], arg[256] = {0};
3869  double time;
3870  int k, n = 0;
3871  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3872  i = 0;
3873  set_tty_echo(1);
3874  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3875  if (k > 0)
3876  buf[i++] = k;
3877  buf[i] = 0;
3878  set_tty_echo(0);
3879  fprintf(stderr, "\n");
3880  if (k > 0 &&
3881  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3882  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3883  target, time, command, arg);
3884  for (i = 0; i < nb_filtergraphs; i++) {
3885  FilterGraph *fg = filtergraphs[i];
3886  if (fg->graph) {
3887  if (time < 0) {
3888  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3889  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3890  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3891  } else if (key == 'c') {
3892  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3893  ret = AVERROR_PATCHWELCOME;
3894  } else {
3895  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3896  if (ret < 0)
3897  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3898  }
3899  }
3900  }
3901  } else {
3903  "Parse error, at least 3 arguments were expected, "
3904  "only %d given in string '%s'\n", n, buf);
3905  }
3906  }
3907  if (key == 'd' || key == 'D'){
3908  int debug=0;
3909  if(key == 'D') {
3910  debug = input_streams[0]->st->codec->debug<<1;
3911  if(!debug) debug = 1;
3912  while(debug & (FF_DEBUG_DCT_COEFF
3913 #if FF_API_DEBUG_MV
3914  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3915 #endif
3916  )) //unsupported, would just crash
3917  debug += debug;
3918  }else{
3919  char buf[32];
3920  int k = 0;
3921  i = 0;
3922  set_tty_echo(1);
3923  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3924  if (k > 0)
3925  buf[i++] = k;
3926  buf[i] = 0;
3927  set_tty_echo(0);
3928  fprintf(stderr, "\n");
3929  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3930  fprintf(stderr,"error parsing debug value\n");
3931  }
3932  for(i=0;i<nb_input_streams;i++) {
3933  input_streams[i]->st->codec->debug = debug;
3934  }
3935  for(i=0;i<nb_output_streams;i++) {
3936  OutputStream *ost = output_streams[i];
3937  ost->enc_ctx->debug = debug;
3938  }
3939  if(debug) av_log_set_level(AV_LOG_DEBUG);
3940  fprintf(stderr,"debug=%d\n", debug);
3941  }
3942  if (key == '?'){
3943  fprintf(stderr, "key function\n"
3944  "? show this help\n"
3945  "+ increase verbosity\n"
3946  "- decrease verbosity\n"
3947  "c Send command to first matching filter supporting it\n"
3948  "C Send/Queue command to all matching filters\n"
3949  "D cycle through available debug modes\n"
3950  "h dump packets/hex press to cycle through the 3 states\n"
3951  "q quit\n"
3952  "s Show QP histogram\n"
3953  );
3954  }
3955  return 0;
3956 }
3957 
3958 #if HAVE_PTHREADS
3959 static void *input_thread(void *arg)
3960 {
3961  InputFile *f = arg;
3962  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3963  int ret = 0;
3964 
3965  while (1) {
3966  AVPacket pkt;
3967  ret = av_read_frame(f->ctx, &pkt);
3968 
3969  if (ret == AVERROR(EAGAIN)) {
3970  av_usleep(10000);
3971  continue;
3972  }
3973  if (ret < 0) {
3974  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3975  break;
3976  }
3977  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3978  if (flags && ret == AVERROR(EAGAIN)) {
3979  flags = 0;
3980  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3982  "Thread message queue blocking; consider raising the "
3983  "thread_queue_size option (current value: %d)\n",
3984  f->thread_queue_size);
3985  }
3986  if (ret < 0) {
3987  if (ret != AVERROR_EOF)
3988  av_log(f->ctx, AV_LOG_ERROR,
3989  "Unable to send packet to main thread: %s\n",
3990  av_err2str(ret));
3991  av_packet_unref(&pkt);
3992  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3993  break;
3994  }
3995  }
3996 
3997  return NULL;
3998 }
3999 
4000 static void free_input_threads(void)
4001 {
4002  int i;
4003 
4004  for (i = 0; i < nb_input_files; i++) {
4005  InputFile *f = input_files[i];
4006  AVPacket pkt;
4007 
4008  if (!f || !f->in_thread_queue)
4009  continue;
4011  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4012  av_packet_unref(&pkt);
4013 
4014  pthread_join(f->thread, NULL);
4015  f->joined = 1;
4016  av_thread_message_queue_free(&f->in_thread_queue);
4017  }
4018 }
4019 
4020 static int init_input_threads(void)
4021 {
4022  int i, ret;
4023 
4024  if (nb_input_files == 1)
4025  return 0;
4026 
4027  for (i = 0; i < nb_input_files; i++) {
4028  InputFile *f = input_files[i];
4029 
4030  if (f->ctx->pb ? !f->ctx->pb->seekable :
4031  strcmp(f->ctx->iformat->name, "lavfi"))
4032  f->non_blocking = 1;
4033  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4034  f->thread_queue_size, sizeof(AVPacket));
4035  if (ret < 0)
4036  return ret;
4037 
4038  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4039  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4040  av_thread_message_queue_free(&f->in_thread_queue);
4041  return AVERROR(ret);
4042  }
4043  }
4044  return 0;
4045 }
4046 
4047 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4048 {
4049  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4050  f->non_blocking ?
4052 }
4053 #endif
4054 
4056 {
4057  if (f->rate_emu) {
4058  int i;
4059  for (i = 0; i < f->nb_streams; i++) {
4060  InputStream *ist = input_streams[f->ist_index + i];
4061  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4062  int64_t now = av_gettime_relative() - ist->start;
4063  if (pts > now)
4064  return AVERROR(EAGAIN);
4065  }
4066  }
4067 
4068 #if HAVE_PTHREADS
4069  if (nb_input_files > 1)
4070  return get_input_packet_mt(f, pkt);
4071 #endif
4072  return av_read_frame(f->ctx, pkt);
4073 }
4074 
4075 static int got_eagain(void)
4076 {
4077  int i;
4078  for (i = 0; i < nb_output_streams; i++)
4079  if (output_streams[i]->unavailable)
4080  return 1;
4081  return 0;
4082 }
4083 
4084 static void reset_eagain(void)
4085 {
4086  int i;
4087  for (i = 0; i < nb_input_files; i++)
4088  input_files[i]->eagain = 0;
4089  for (i = 0; i < nb_output_streams; i++)
4090  output_streams[i]->unavailable = 0;
4091 }
4092 
4093 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4094 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4095  AVRational time_base)
4096 {
4097  int ret;
4098 
4099  if (!*duration) {
4100  *duration = tmp;
4101  return tmp_time_base;
4102  }
4103 
4104  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4105  if (ret < 0) {
4106  *duration = tmp;
4107  return tmp_time_base;
4108  }
4109 
4110  return time_base;
4111 }
4112 
4114 {
4115  InputStream *ist;
4116  AVCodecContext *avctx;
4117  int i, ret, has_audio = 0;
4118  int64_t duration = 0;
4119 
4120  ret = av_seek_frame(is, -1, is->start_time, 0);
4121  if (ret < 0)
4122  return ret;
4123 
4124  for (i = 0; i < ifile->nb_streams; i++) {
4125  ist = input_streams[ifile->ist_index + i];
4126  avctx = ist->dec_ctx;
4127 
4128  // flush decoders
4129  if (ist->decoding_needed) {
4130  process_input_packet(ist, NULL, 1);
4131  avcodec_flush_buffers(avctx);
4132  }
4133 
4134  /* duration is the length of the last frame in a stream
4135  * when audio stream is present we don't care about
4136  * last video frame length because it's not defined exactly */
4137  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4138  has_audio = 1;
4139  }
4140 
4141  for (i = 0; i < ifile->nb_streams; i++) {
4142  ist = input_streams[ifile->ist_index + i];
4143  avctx = ist->dec_ctx;
4144 
4145  if (has_audio) {
4146  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4147  AVRational sample_rate = {1, avctx->sample_rate};
4148 
4149  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4150  } else {
4151  continue;
4152  }
4153  } else {
4154  if (ist->framerate.num) {
4155  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4156  } else if (ist->st->avg_frame_rate.num) {
4157  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4158  } else {
4159  duration = 1;
4160  }
4161  }
4162  if (!ifile->duration)
4163  ifile->time_base = ist->st->time_base;
4164  /* the total duration of the stream, max_pts - min_pts is
4165  * the duration of the stream without the last frame */
4166  duration += ist->max_pts - ist->min_pts;
4167  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4168  ifile->time_base);
4169  }
4170 
4171  if (ifile->loop > 0)
4172  ifile->loop--;
4173 
4174  return ret;
4175 }
4176 
4177 /*
4178  * Return
4179  * - 0 -- one packet was read and processed
4180  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4181  * this function should be called again
4182  * - AVERROR_EOF -- this function should not be called again
4183  */
4184 static int process_input(int file_index)
4185 {
4186  InputFile *ifile = input_files[file_index];
4187  AVFormatContext *is;
4188  InputStream *ist;
4189  AVPacket pkt;
4190  int ret, i, j;
4191  int64_t duration;
4192  int64_t pkt_dts;
4193 
4194  is = ifile->ctx;
4195  ret = get_input_packet(ifile, &pkt);
4196 
4197  if (ret == AVERROR(EAGAIN)) {
4198  ifile->eagain = 1;
4199  return ret;
4200  }
4201  if (ret < 0 && ifile->loop) {
4202  ret = seek_to_start(ifile, is);
4203  if (ret < 0)
4204  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4205  else
4206  ret = get_input_packet(ifile, &pkt);
4207  if (ret == AVERROR(EAGAIN)) {
4208  ifile->eagain = 1;
4209  return ret;
4210  }
4211  }
4212  if (ret < 0) {
4213  if (ret != AVERROR_EOF) {
4214  print_error(is->filename, ret);
4215  if (exit_on_error)
4216  exit_program(1);
4217  }
4218 
4219  for (i = 0; i < ifile->nb_streams; i++) {
4220  ist = input_streams[ifile->ist_index + i];
4221  if (ist->decoding_needed) {
4222  ret = process_input_packet(ist, NULL, 0);
4223  if (ret>0)
4224  return 0;
4225  }
4226 
4227  /* mark all outputs that don't go through lavfi as finished */
4228  for (j = 0; j < nb_output_streams; j++) {
4229  OutputStream *ost = output_streams[j];
4230 
4231  if (ost->source_index == ifile->ist_index + i &&
4232  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4233  finish_output_stream(ost);
4234  }
4235  }
4236 
4237  ifile->eof_reached = 1;
4238  return AVERROR(EAGAIN);
4239  }
4240 
4241  reset_eagain();
4242 
4243  if (do_pkt_dump) {
4245  is->streams[pkt.stream_index]);
4246  }
4247  /* the following test is needed in case new streams appear
4248  dynamically in stream : we ignore them */
4249  if (pkt.stream_index >= ifile->nb_streams) {
4250  report_new_stream(file_index, &pkt);
4251  goto discard_packet;
4252  }
4253 
4254  ist = input_streams[ifile->ist_index + pkt.stream_index];
4255 
4256  ist->data_size += pkt.size;
4257  ist->nb_packets++;
4258 
4259  if (ist->discard)
4260  goto discard_packet;
4261 
4262  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4263  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4264  exit_program(1);
4265  }
4266 
4267  if (debug_ts) {
4268  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4269  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4273  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4274  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4275  av_ts2str(input_files[ist->file_index]->ts_offset),
4276  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4277  }
4278 
4279  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4280  int64_t stime, stime2;
4281  // Correcting starttime based on the enabled streams
4282  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4283  // so we instead do it here as part of discontinuity handling
4284  if ( ist->next_dts == AV_NOPTS_VALUE
4285  && ifile->ts_offset == -is->start_time
4286  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4287  int64_t new_start_time = INT64_MAX;
4288  for (i=0; i<is->nb_streams; i++) {
4289  AVStream *st = is->streams[i];
4290  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4291  continue;
4292  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4293  }
4294  if (new_start_time > is->start_time) {
4295  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4296  ifile->ts_offset = -new_start_time;
4297  }
4298  }
4299 
4300  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4301  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4302  ist->wrap_correction_done = 1;
4303 
4304  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4305  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4306  ist->wrap_correction_done = 0;
4307  }
4308  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4309  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4310  ist->wrap_correction_done = 0;
4311  }
4312  }
4313 
4314  /* add the stream-global side data to the first packet */
4315  if (ist->nb_packets == 1) {
4316  for (i = 0; i < ist->st->nb_side_data; i++) {
4317  AVPacketSideData *src_sd = &ist->st->side_data[i];
4318  uint8_t *dst_data;
4319 
4320  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4321  continue;
4322 
4323  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4324  continue;
4325 
4326  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4327  if (!dst_data)
4328  exit_program(1);
4329 
4330  memcpy(dst_data, src_sd->data, src_sd->size);
4331  }
4332  }
4333 
4334  if (pkt.dts != AV_NOPTS_VALUE)
4335  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4336  if (pkt.pts != AV_NOPTS_VALUE)
4337  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4338 
4339  if (pkt.pts != AV_NOPTS_VALUE)
4340  pkt.pts *= ist->ts_scale;
4341  if (pkt.dts != AV_NOPTS_VALUE)
4342  pkt.dts *= ist->ts_scale;
4343 
4345  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4347  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4348  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4349  int64_t delta = pkt_dts - ifile->last_ts;
4350  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4351  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4352  ifile->ts_offset -= delta;
4354  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4355  delta, ifile->ts_offset);
4356  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4357  if (pkt.pts != AV_NOPTS_VALUE)
4358  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4359  }
4360  }
4361 
4362  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4363  if (pkt.pts != AV_NOPTS_VALUE) {
4364  pkt.pts += duration;
4365  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4366  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4367  }
4368 
4369  if (pkt.dts != AV_NOPTS_VALUE)
4370  pkt.dts += duration;
4371 
4373  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4375  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4376  !copy_ts) {
4377  int64_t delta = pkt_dts - ist->next_dts;
4378  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4379  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4380  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4381  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4382  ifile->ts_offset -= delta;
4384  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4385  delta, ifile->ts_offset);
4386  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4387  if (pkt.pts != AV_NOPTS_VALUE)
4388  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4389  }
4390  } else {
4391  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4392  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4393  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4394  pkt.dts = AV_NOPTS_VALUE;
4395  }
4396  if (pkt.pts != AV_NOPTS_VALUE){
4397  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4398  delta = pkt_pts - ist->next_dts;
4399  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4400  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4401  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4402  pkt.pts = AV_NOPTS_VALUE;
4403  }
4404  }
4405  }
4406  }
4407 
4408  if (pkt.dts != AV_NOPTS_VALUE)
4409  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4410 
4411  if (debug_ts) {
4412  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4414  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4415  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4416  av_ts2str(input_files[ist->file_index]->ts_offset),
4417  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4418  }
4419 
4420  sub2video_heartbeat(ist, pkt.pts);
4421 
4422  process_input_packet(ist, &pkt, 0);
4423 
4424 discard_packet:
4425  av_packet_unref(&pkt);
4426 
4427  return 0;
4428 }
4429 
4430 /**
4431  * Perform a step of transcoding for the specified filter graph.
4432  *
4433  * @param[in] graph filter graph to consider
4434  * @param[out] best_ist input stream where a frame would allow to continue
4435  * @return 0 for success, <0 for error
4436  */
4437 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4438 {
4439  int i, ret;
4440  int nb_requests, nb_requests_max = 0;
4441  InputFilter *ifilter;
4442  InputStream *ist;
4443 
4444  *best_ist = NULL;
4445  ret = avfilter_graph_request_oldest(graph->graph);
4446  if (ret >= 0)
4447  return reap_filters(0);
4448 
4449  if (ret == AVERROR_EOF) {
4450  ret = reap_filters(1);
4451  for (i = 0; i < graph->nb_outputs; i++)
4452  close_output_stream(graph->outputs[i]->ost);
4453  return ret;
4454  }
4455  if (ret != AVERROR(EAGAIN))
4456  return ret;
4457 
4458  for (i = 0; i < graph->nb_inputs; i++) {
4459  ifilter = graph->inputs[i];
4460  ist = ifilter->ist;
4461  if (input_files[ist->file_index]->eagain ||
4462  input_files[ist->file_index]->eof_reached)
4463  continue;
4464  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4465  if (nb_requests > nb_requests_max) {
4466  nb_requests_max = nb_requests;
4467  *best_ist = ist;
4468  }
4469  }
4470 
4471  if (!*best_ist)
4472  for (i = 0; i < graph->nb_outputs; i++)
4473  graph->outputs[i]->ost->unavailable = 1;
4474 
4475  return 0;
4476 }
4477 
4478 /**
4479  * Run a single step of transcoding.
4480  *
4481  * @return 0 for success, <0 for error
4482  */
4483 static int transcode_step(void)
4484 {
4485  OutputStream *ost;
4486  InputStream *ist = NULL;
4487  int ret;
4488 
4489  ost = choose_output();
4490  if (!ost) {
4491  if (got_eagain()) {
4492  reset_eagain();
4493  av_usleep(10000);
4494  return 0;
4495  }
4496  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4497  return AVERROR_EOF;
4498  }
4499 
4500  if (ost->filter && !ost->filter->graph->graph) {
4502  ret = configure_filtergraph(ost->filter->graph);
4503  if (ret < 0) {
4504  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4505  return ret;
4506  }
4507  }
4508  }
4509 
4510  if (ost->filter && ost->filter->graph->graph) {
4511  if (!ost->initialized) {
4512  char error[1024] = {0};
4513  ret = init_output_stream(ost, error, sizeof(error));
4514  if (ret < 0) {
4515  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4516  ost->file_index, ost->index, error);
4517  exit_program(1);
4518  }
4519  }
4520  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4521  return ret;
4522  if (!ist)
4523  return 0;
4524  } else if (ost->filter) {
4525  int i;
4526  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4527  InputFilter *ifilter = ost->filter->graph->inputs[i];
4528  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4529  ist = ifilter->ist;
4530  break;
4531  }
4532  }
4533  if (!ist) {
4534  ost->inputs_done = 1;
4535  return 0;
4536  }
4537  } else {
4538  av_assert0(ost->source_index >= 0);
4539  ist = input_streams[ost->source_index];
4540  }
4541 
4542  ret = process_input(ist->file_index);
4543  if (ret == AVERROR(EAGAIN)) {
4544  if (input_files[ist->file_index]->eagain)
4545  ost->unavailable = 1;
4546  return 0;
4547  }
4548 
4549  if (ret < 0)
4550  return ret == AVERROR_EOF ? 0 : ret;
4551 
4552  return reap_filters(0);
4553 }
4554 
4555 /*
4556  * The following code is the main loop of the file converter
4557  */
4558 static int transcode(void)
4559 {
4560  int ret, i;
4561  AVFormatContext *os;
4562  OutputStream *ost;
4563  InputStream *ist;
4564  int64_t timer_start;
4565  int64_t total_packets_written = 0;
4566 
4567  ret = transcode_init();
4568  if (ret < 0)
4569  goto fail;
4570 
4571  if (stdin_interaction) {
4572  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4573  }
4574 
4575  timer_start = av_gettime_relative();
4576 
4577 #if HAVE_PTHREADS
4578  if ((ret = init_input_threads()) < 0)
4579  goto fail;
4580 #endif
4581 
4582  while (!received_sigterm) {
4583  int64_t cur_time= av_gettime_relative();
4584 
4585  /* if 'q' pressed, exits */
4586  if (stdin_interaction)
4587  if (check_keyboard_interaction(cur_time) < 0)
4588  break;
4589 
4590  /* check if there's any stream where output is still needed */
4591  if (!need_output()) {
4592  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4593  break;
4594  }
4595 
4596  ret = transcode_step();
4597  if (ret < 0 && ret != AVERROR_EOF) {
4598  char errbuf[128];
4599  av_strerror(ret, errbuf, sizeof(errbuf));
4600 
4601  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4602  break;
4603  }
4604 
4605  /* dump report by using the output first video and audio streams */
4606  print_report(0, timer_start, cur_time);
4607  }
4608 #if HAVE_PTHREADS
4609  free_input_threads();
4610 #endif
4611 
4612  /* at the end of stream, we must flush the decoder buffers */
4613  for (i = 0; i < nb_input_streams; i++) {
4614  ist = input_streams[i];
4615  if (!input_files[ist->file_index]->eof_reached) {
4616  process_input_packet(ist, NULL, 0);
4617  }
4618  }
4619  flush_encoders();
4620 
4621  term_exit();
4622 
4623  /* write the trailer if needed and close file */
4624  for (i = 0; i < nb_output_files; i++) {
4625  os = output_files[i]->ctx;
4626  if (!output_files[i]->header_written) {
4628  "Nothing was written into output file %d (%s), because "
4629  "at least one of its streams received no packets.\n",
4630  i, os->filename);
4631  continue;
4632  }
4633  if ((ret = av_write_trailer(os)) < 0) {
4634  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4635  if (exit_on_error)
4636  exit_program(1);
4637  }
4638  }
4639 
4640  /* dump report by using the first video and audio streams */
4641  print_report(1, timer_start, av_gettime_relative());
4642 
4643  /* close each encoder */
4644  for (i = 0; i < nb_output_streams; i++) {
4645  ost = output_streams[i];
4646  if (ost->encoding_needed) {
4647  av_freep(&ost->enc_ctx->stats_in);
4648  }
4649  total_packets_written += ost->packets_written;
4650  }
4651 
4652  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4653  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4654  exit_program(1);
4655  }
4656 
4657  /* close each decoder */
4658  for (i = 0; i < nb_input_streams; i++) {
4659  ist = input_streams[i];
4660  if (ist->decoding_needed) {
4661  avcodec_close(ist->dec_ctx);
4662  if (ist->hwaccel_uninit)
4663  ist->hwaccel_uninit(ist->dec_ctx);
4664  }
4665  }
4666 
4669 
4670  /* finished ! */
4671  ret = 0;
4672 
4673  fail:
4674 #if HAVE_PTHREADS
4675  free_input_threads();
4676 #endif
4677 
4678  if (output_streams) {
4679  for (i = 0; i < nb_output_streams; i++) {
4680  ost = output_streams[i];
4681  if (ost) {
4682  if (ost->logfile) {
4683  if (fclose(ost->logfile))
4685  "Error closing logfile, loss of information possible: %s\n",
4686  av_err2str(AVERROR(errno)));
4687  ost->logfile = NULL;
4688  }
4689  av_freep(&ost->forced_kf_pts);
4690