FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223 
224  av_assert1(frame->data[0]);
225  ist->sub2video.last_pts = frame->pts = pts;
226  for (i = 0; i < ist->nb_filters; i++)
230 }
231 
233 {
234  AVFrame *frame = ist->sub2video.frame;
235  int8_t *dst;
236  int dst_linesize;
237  int num_rects, i;
238  int64_t pts, end_pts;
239 
240  if (!frame)
241  return;
242  if (sub) {
243  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244  AV_TIME_BASE_Q, ist->st->time_base);
245  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  num_rects = sub->num_rects;
248  } else {
249  pts = ist->sub2video.end_pts;
250  end_pts = INT64_MAX;
251  num_rects = 0;
252  }
253  if (sub2video_get_blank_frame(ist) < 0) {
255  "Impossible to get a blank canvas.\n");
256  return;
257  }
258  dst = frame->data [0];
259  dst_linesize = frame->linesize[0];
260  for (i = 0; i < num_rects; i++)
261  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262  sub2video_push_ref(ist, pts);
263  ist->sub2video.end_pts = end_pts;
264 }
265 
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 {
268  InputFile *infile = input_files[ist->file_index];
269  int i, j, nb_reqs;
270  int64_t pts2;
271 
272  /* When a frame is read from a file, examine all sub2video streams in
273  the same file and send the sub2video frame again. Otherwise, decoded
274  video frames could be accumulating in the filter graph while a filter
275  (possibly overlay) is desperately waiting for a subtitle frame. */
276  for (i = 0; i < infile->nb_streams; i++) {
277  InputStream *ist2 = input_streams[infile->ist_index + i];
278  if (!ist2->sub2video.frame)
279  continue;
280  /* subtitles seem to be usually muxed ahead of other streams;
281  if not, subtracting a larger time here is necessary */
282  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283  /* do not send the heartbeat frame if the subtitle is already ahead */
284  if (pts2 <= ist2->sub2video.last_pts)
285  continue;
286  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287  sub2video_update(ist2, NULL);
288  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290  if (nb_reqs)
291  sub2video_push_ref(ist2, pts2);
292  }
293 }
294 
295 static void sub2video_flush(InputStream *ist)
296 {
297  int i;
298 
299  if (ist->sub2video.end_pts < INT64_MAX)
300  sub2video_update(ist, NULL);
301  for (i = 0; i < ist->nb_filters; i++)
303 }
304 
305 /* end of sub2video hack */
306 
307 static void term_exit_sigsafe(void)
308 {
309 #if HAVE_TERMIOS_H
310  if(restore_tty)
311  tcsetattr (0, TCSANOW, &oldtty);
312 #endif
313 }
314 
315 void term_exit(void)
316 {
317  av_log(NULL, AV_LOG_QUIET, "%s", "");
319 }
320 
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
326 
327 static void
329 {
330  received_sigterm = sig;
333  if(received_nb_signals > 3) {
334  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335  strlen("Received > 3 system signals, hard exiting\n"));
336 
337  exit(123);
338  }
339 }
340 
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 {
344  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 
346  switch (fdwCtrlType)
347  {
348  case CTRL_C_EVENT:
349  case CTRL_BREAK_EVENT:
350  sigterm_handler(SIGINT);
351  return TRUE;
352 
353  case CTRL_CLOSE_EVENT:
354  case CTRL_LOGOFF_EVENT:
355  case CTRL_SHUTDOWN_EVENT:
356  sigterm_handler(SIGTERM);
357  /* Basically, with these 3 events, when we return from this method the
358  process is hard terminated, so stall as long as we need to
359  to try and let the main thread(s) clean up and gracefully terminate
360  (we have at most 5 seconds, but should be done far before that). */
361  while (!ffmpeg_exited) {
362  Sleep(0);
363  }
364  return TRUE;
365 
366  default:
367  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368  return FALSE;
369  }
370 }
371 #endif
372 
373 void term_init(void)
374 {
375 #if HAVE_TERMIOS_H
377  struct termios tty;
378  if (tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480  AVFrame *frame;
481  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482  sizeof(frame), NULL);
483  av_frame_free(&frame);
484  }
485  av_fifo_freep(&fg->inputs[j]->frame_queue);
486  if (fg->inputs[j]->ist->sub2video.sub_queue) {
487  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488  AVSubtitle sub;
490  &sub, sizeof(sub), NULL);
491  avsubtitle_free(&sub);
492  }
494  }
496  av_freep(&fg->inputs[j]->name);
497  av_freep(&fg->inputs[j]);
498  }
499  av_freep(&fg->inputs);
500  for (j = 0; j < fg->nb_outputs; j++) {
501  av_freep(&fg->outputs[j]->name);
502  av_freep(&fg->outputs[j]->formats);
503  av_freep(&fg->outputs[j]->channel_layouts);
504  av_freep(&fg->outputs[j]->sample_rates);
505  av_freep(&fg->outputs[j]);
506  }
507  av_freep(&fg->outputs);
508  av_freep(&fg->graph_desc);
509 
510  av_freep(&filtergraphs[i]);
511  }
512  av_freep(&filtergraphs);
513 
515 
516  /* close files */
517  for (i = 0; i < nb_output_files; i++) {
518  OutputFile *of = output_files[i];
520  if (!of)
521  continue;
522  s = of->ctx;
523  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524  avio_closep(&s->pb);
526  av_dict_free(&of->opts);
527 
528  av_freep(&output_files[i]);
529  }
530  for (i = 0; i < nb_output_streams; i++) {
531  OutputStream *ost = output_streams[i];
532 
533  if (!ost)
534  continue;
535 
536  for (j = 0; j < ost->nb_bitstream_filters; j++)
537  av_bsf_free(&ost->bsf_ctx[j]);
538  av_freep(&ost->bsf_ctx);
539 
541  av_frame_free(&ost->last_frame);
542  av_dict_free(&ost->encoder_opts);
543 
544  av_parser_close(ost->parser);
546 
547  av_freep(&ost->forced_keyframes);
549  av_freep(&ost->avfilter);
550  av_freep(&ost->logfile_prefix);
551 
553  ost->audio_channels_mapped = 0;
554 
555  av_dict_free(&ost->sws_dict);
556 
559 
560  if (ost->muxing_queue) {
561  while (av_fifo_size(ost->muxing_queue)) {
562  AVPacket pkt;
563  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564  av_packet_unref(&pkt);
565  }
567  }
568 
569  av_freep(&output_streams[i]);
570  }
571 #if HAVE_PTHREADS
572  free_input_threads();
573 #endif
574  for (i = 0; i < nb_input_files; i++) {
575  avformat_close_input(&input_files[i]->ctx);
576  av_freep(&input_files[i]);
577  }
578  for (i = 0; i < nb_input_streams; i++) {
579  InputStream *ist = input_streams[i];
580 
583  av_dict_free(&ist->decoder_opts);
586  av_freep(&ist->filters);
587  av_freep(&ist->hwaccel_device);
588  av_freep(&ist->dts_buffer);
589 
591 
592  av_freep(&input_streams[i]);
593  }
594 
595  if (vstats_file) {
596  if (fclose(vstats_file))
598  "Error closing vstats file, loss of information possible: %s\n",
599  av_err2str(AVERROR(errno)));
600  }
602 
603  av_freep(&input_streams);
604  av_freep(&input_files);
605  av_freep(&output_streams);
606  av_freep(&output_files);
607 
608  uninit_opts();
609 
611 
612  if (received_sigterm) {
613  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614  (int) received_sigterm);
615  } else if (ret && atomic_load(&transcode_init_done)) {
616  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
617  }
618  term_exit();
619  ffmpeg_exited = 1;
620 }
621 
623 {
624  AVDictionaryEntry *t = NULL;
625 
626  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628  }
629 }
630 
632 {
634  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
636  exit_program(1);
637  }
638 }
639 
640 static void abort_codec_experimental(AVCodec *c, int encoder)
641 {
642  exit_program(1);
643 }
644 
645 static void update_benchmark(const char *fmt, ...)
646 {
647  if (do_benchmark_all) {
648  int64_t t = getutime();
649  va_list va;
650  char buf[1024];
651 
652  if (fmt) {
653  va_start(va, fmt);
654  vsnprintf(buf, sizeof(buf), fmt, va);
655  va_end(va);
656  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
657  }
658  current_time = t;
659  }
660 }
661 
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 {
664  int i;
665  for (i = 0; i < nb_output_streams; i++) {
666  OutputStream *ost2 = output_streams[i];
667  ost2->finished |= ost == ost2 ? this_stream : others;
668  }
669 }
670 
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
672 {
673  AVFormatContext *s = of->ctx;
674  AVStream *st = ost->st;
675  int ret;
676 
677  /*
678  * Audio encoders may split the packets -- #frames in != #packets out.
679  * But there is no reordering, so we can limit the number of output packets
680  * by simply dropping them here.
681  * Counting encoded video frames needs to be done separately because of
682  * reordering, see do_video_out().
683  * Do not count the packet when unqueued because it has been counted when queued.
684  */
685  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686  if (ost->frame_number >= ost->max_frames) {
687  av_packet_unref(pkt);
688  return;
689  }
690  ost->frame_number++;
691  }
692 
693  if (!of->header_written) {
694  AVPacket tmp_pkt = {0};
695  /* the muxer is not initialized yet, buffer the packet */
696  if (!av_fifo_space(ost->muxing_queue)) {
697  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698  ost->max_muxing_queue_size);
699  if (new_size <= av_fifo_size(ost->muxing_queue)) {
701  "Too many packets buffered for output stream %d:%d.\n",
702  ost->file_index, ost->st->index);
703  exit_program(1);
704  }
705  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
706  if (ret < 0)
707  exit_program(1);
708  }
709  ret = av_packet_ref(&tmp_pkt, pkt);
710  if (ret < 0)
711  exit_program(1);
712  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713  av_packet_unref(pkt);
714  return;
715  }
716 
719  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
720 
721  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
722  int i;
724  NULL);
725  ost->quality = sd ? AV_RL32(sd) : -1;
726  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
727 
728  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
729  if (sd && i < sd[5])
730  ost->error[i] = AV_RL64(sd + 8 + 8*i);
731  else
732  ost->error[i] = -1;
733  }
734 
735  if (ost->frame_rate.num && ost->is_cfr) {
736  if (pkt->duration > 0)
737  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739  ost->mux_timebase);
740  }
741  }
742 
743  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
744 
745  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746  if (pkt->dts != AV_NOPTS_VALUE &&
747  pkt->pts != AV_NOPTS_VALUE &&
748  pkt->dts > pkt->pts) {
749  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
750  pkt->dts, pkt->pts,
751  ost->file_index, ost->st->index);
752  pkt->pts =
753  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
756  }
758  pkt->dts != AV_NOPTS_VALUE &&
759  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760  ost->last_mux_dts != AV_NOPTS_VALUE) {
761  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762  if (pkt->dts < max) {
763  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764  av_log(s, loglevel, "Non-monotonous DTS in output stream "
765  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
767  if (exit_on_error) {
768  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
769  exit_program(1);
770  }
771  av_log(s, loglevel, "changing to %"PRId64". This may result "
772  "in incorrect timestamps in the output file.\n",
773  max);
774  if (pkt->pts >= pkt->dts)
775  pkt->pts = FFMAX(pkt->pts, max);
776  pkt->dts = max;
777  }
778  }
779  }
780  ost->last_mux_dts = pkt->dts;
781 
782  ost->data_size += pkt->size;
783  ost->packets_written++;
784 
785  pkt->stream_index = ost->index;
786 
787  if (debug_ts) {
788  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793  pkt->size
794  );
795  }
796 
797  ret = av_interleaved_write_frame(s, pkt);
798  if (ret < 0) {
799  print_error("av_interleaved_write_frame()", ret);
800  main_return_code = 1;
802  }
803  av_packet_unref(pkt);
804 }
805 
807 {
808  OutputFile *of = output_files[ost->file_index];
809 
810  ost->finished |= ENCODER_FINISHED;
811  if (of->shortest) {
812  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813  of->recording_time = FFMIN(of->recording_time, end);
814  }
815 }
816 
817 /*
818  * Send a single packet to the output, applying any bitstream filters
819  * associated with the output stream. This may result in any number
820  * of packets actually being written, depending on what bitstream
821  * filters are applied. The supplied packet is consumed and will be
822  * blank (as if newly-allocated) when this function returns.
823  *
824  * If eof is set, instead indicate EOF to all bitstream filters and
825  * therefore flush any delayed packets to the output. A blank packet
826  * must be supplied in this case.
827  */
829  OutputStream *ost, int eof)
830 {
831  int ret = 0;
832 
833  /* apply the output bitstream filters, if any */
834  if (ost->nb_bitstream_filters) {
835  int idx;
836 
837  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
838  if (ret < 0)
839  goto finish;
840 
841  eof = 0;
842  idx = 1;
843  while (idx) {
844  /* get a packet from the previous filter up the chain */
845  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846  if (ret == AVERROR(EAGAIN)) {
847  ret = 0;
848  idx--;
849  continue;
850  } else if (ret == AVERROR_EOF) {
851  eof = 1;
852  } else if (ret < 0)
853  goto finish;
854 
855  /* send it to the next filter down the chain or to the muxer */
856  if (idx < ost->nb_bitstream_filters) {
857  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
858  if (ret < 0)
859  goto finish;
860  idx++;
861  eof = 0;
862  } else if (eof)
863  goto finish;
864  else
865  write_packet(of, pkt, ost, 0);
866  }
867  } else if (!eof)
868  write_packet(of, pkt, ost, 0);
869 
870 finish:
871  if (ret < 0 && ret != AVERROR_EOF) {
872  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
874  if(exit_on_error)
875  exit_program(1);
876  }
877 }
878 
880 {
881  OutputFile *of = output_files[ost->file_index];
882 
883  if (of->recording_time != INT64_MAX &&
885  AV_TIME_BASE_Q) >= 0) {
886  close_output_stream(ost);
887  return 0;
888  }
889  return 1;
890 }
891 
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
893  AVFrame *frame)
894 {
895  AVCodecContext *enc = ost->enc_ctx;
896  AVPacket pkt;
897  int ret;
898 
899  av_init_packet(&pkt);
900  pkt.data = NULL;
901  pkt.size = 0;
902 
903  if (!check_recording_time(ost))
904  return;
905 
906  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907  frame->pts = ost->sync_opts;
908  ost->sync_opts = frame->pts + frame->nb_samples;
909  ost->samples_encoded += frame->nb_samples;
910  ost->frames_encoded++;
911 
912  av_assert0(pkt.size || !pkt.data);
914  if (debug_ts) {
915  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918  enc->time_base.num, enc->time_base.den);
919  }
920 
921  ret = avcodec_send_frame(enc, frame);
922  if (ret < 0)
923  goto error;
924 
925  while (1) {
926  ret = avcodec_receive_packet(enc, &pkt);
927  if (ret == AVERROR(EAGAIN))
928  break;
929  if (ret < 0)
930  goto error;
931 
932  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
933 
934  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
935 
936  if (debug_ts) {
937  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
941  }
942 
943  output_packet(of, &pkt, ost, 0);
944  }
945 
946  return;
947 error:
948  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
949  exit_program(1);
950 }
951 
952 static void do_subtitle_out(OutputFile *of,
953  OutputStream *ost,
954  AVSubtitle *sub)
955 {
956  int subtitle_out_max_size = 1024 * 1024;
957  int subtitle_out_size, nb, i;
958  AVCodecContext *enc;
959  AVPacket pkt;
960  int64_t pts;
961 
962  if (sub->pts == AV_NOPTS_VALUE) {
963  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
964  if (exit_on_error)
965  exit_program(1);
966  return;
967  }
968 
969  enc = ost->enc_ctx;
970 
971  if (!subtitle_out) {
972  subtitle_out = av_malloc(subtitle_out_max_size);
973  if (!subtitle_out) {
974  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
975  exit_program(1);
976  }
977  }
978 
979  /* Note: DVB subtitle need one packet to draw them and one other
980  packet to clear them */
981  /* XXX: signal it in the codec context ? */
983  nb = 2;
984  else
985  nb = 1;
986 
987  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
988  pts = sub->pts;
989  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990  pts -= output_files[ost->file_index]->start_time;
991  for (i = 0; i < nb; i++) {
992  unsigned save_num_rects = sub->num_rects;
993 
994  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995  if (!check_recording_time(ost))
996  return;
997 
998  sub->pts = pts;
999  // start_display_time is required to be 0
1000  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001  sub->end_display_time -= sub->start_display_time;
1002  sub->start_display_time = 0;
1003  if (i == 1)
1004  sub->num_rects = 0;
1005 
1006  ost->frames_encoded++;
1007 
1008  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009  subtitle_out_max_size, sub);
1010  if (i == 1)
1011  sub->num_rects = save_num_rects;
1012  if (subtitle_out_size < 0) {
1013  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1014  exit_program(1);
1015  }
1016 
1017  av_init_packet(&pkt);
1018  pkt.data = subtitle_out;
1019  pkt.size = subtitle_out_size;
1020  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023  /* XXX: the pts correction is handled here. Maybe handling
1024  it in the codec would be better */
1025  if (i == 0)
1026  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027  else
1028  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029  }
1030  pkt.dts = pkt.pts;
1031  output_packet(of, &pkt, ost, 0);
1032  }
1033 }
1034 
1035 static void do_video_out(OutputFile *of,
1036  OutputStream *ost,
1037  AVFrame *next_picture,
1038  double sync_ipts)
1039 {
1040  int ret, format_video_sync;
1041  AVPacket pkt;
1042  AVCodecContext *enc = ost->enc_ctx;
1043  AVCodecParameters *mux_par = ost->st->codecpar;
1044  AVRational frame_rate;
1045  int nb_frames, nb0_frames, i;
1046  double delta, delta0;
1047  double duration = 0;
1048  int frame_size = 0;
1049  InputStream *ist = NULL;
1051 
1052  if (ost->source_index >= 0)
1053  ist = input_streams[ost->source_index];
1054 
1055  frame_rate = av_buffersink_get_frame_rate(filter);
1056  if (frame_rate.num > 0 && frame_rate.den > 0)
1057  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1058 
1059  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1061 
1062  if (!ost->filters_script &&
1063  !ost->filters &&
1064  next_picture &&
1065  ist &&
1066  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1068  }
1069 
1070  if (!next_picture) {
1071  //end, flushing
1072  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073  ost->last_nb0_frames[1],
1074  ost->last_nb0_frames[2]);
1075  } else {
1076  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077  delta = delta0 + duration;
1078 
1079  /* by default, we output a single frame */
1080  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1081  nb_frames = 1;
1082 
1083  format_video_sync = video_sync_method;
1084  if (format_video_sync == VSYNC_AUTO) {
1085  if(!strcmp(of->ctx->oformat->name, "avi")) {
1086  format_video_sync = VSYNC_VFR;
1087  } else
1088  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1089  if ( ist
1090  && format_video_sync == VSYNC_CFR
1091  && input_files[ist->file_index]->ctx->nb_streams == 1
1092  && input_files[ist->file_index]->input_ts_offset == 0) {
1093  format_video_sync = VSYNC_VSCFR;
1094  }
1095  if (format_video_sync == VSYNC_CFR && copy_ts) {
1096  format_video_sync = VSYNC_VSCFR;
1097  }
1098  }
1099  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1100 
1101  if (delta0 < 0 &&
1102  delta > 0 &&
1103  format_video_sync != VSYNC_PASSTHROUGH &&
1104  format_video_sync != VSYNC_DROP) {
1105  if (delta0 < -0.6) {
1106  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1107  } else
1108  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109  sync_ipts = ost->sync_opts;
1110  duration += delta0;
1111  delta0 = 0;
1112  }
1113 
1114  switch (format_video_sync) {
1115  case VSYNC_VSCFR:
1116  if (ost->frame_number == 0 && delta0 >= 0.5) {
1117  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1118  delta = duration;
1119  delta0 = 0;
1120  ost->sync_opts = lrint(sync_ipts);
1121  }
1122  case VSYNC_CFR:
1123  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1125  nb_frames = 0;
1126  } else if (delta < -1.1)
1127  nb_frames = 0;
1128  else if (delta > 1.1) {
1129  nb_frames = lrintf(delta);
1130  if (delta0 > 1.1)
1131  nb0_frames = lrintf(delta0 - 0.6);
1132  }
1133  break;
1134  case VSYNC_VFR:
1135  if (delta <= -0.6)
1136  nb_frames = 0;
1137  else if (delta > 0.6)
1138  ost->sync_opts = lrint(sync_ipts);
1139  break;
1140  case VSYNC_DROP:
1141  case VSYNC_PASSTHROUGH:
1142  ost->sync_opts = lrint(sync_ipts);
1143  break;
1144  default:
1145  av_assert0(0);
1146  }
1147  }
1148 
1149  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150  nb0_frames = FFMIN(nb0_frames, nb_frames);
1151 
1152  memmove(ost->last_nb0_frames + 1,
1153  ost->last_nb0_frames,
1154  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155  ost->last_nb0_frames[0] = nb0_frames;
1156 
1157  if (nb0_frames == 0 && ost->last_dropped) {
1158  nb_frames_drop++;
1160  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161  ost->frame_number, ost->st->index, ost->last_frame->pts);
1162  }
1163  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164  if (nb_frames > dts_error_threshold * 30) {
1165  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1166  nb_frames_drop++;
1167  return;
1168  }
1169  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171  if (nb_frames_dup > dup_warning) {
1172  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1173  dup_warning *= 10;
1174  }
1175  }
1176  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1177 
1178  /* duplicates frame if needed */
1179  for (i = 0; i < nb_frames; i++) {
1180  AVFrame *in_picture;
1181  av_init_packet(&pkt);
1182  pkt.data = NULL;
1183  pkt.size = 0;
1184 
1185  if (i < nb0_frames && ost->last_frame) {
1186  in_picture = ost->last_frame;
1187  } else
1188  in_picture = next_picture;
1189 
1190  if (!in_picture)
1191  return;
1192 
1193  in_picture->pts = ost->sync_opts;
1194 
1195 #if 1
1196  if (!check_recording_time(ost))
1197 #else
1198  if (ost->frame_number >= ost->max_frames)
1199 #endif
1200  return;
1201 
1202 #if FF_API_LAVF_FMT_RAWPICTURE
1203  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1204  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1205  /* raw pictures are written as AVPicture structure to
1206  avoid any copies. We support temporarily the older
1207  method. */
1208  if (in_picture->interlaced_frame)
1209  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1210  else
1211  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1212  pkt.data = (uint8_t *)in_picture;
1213  pkt.size = sizeof(AVPicture);
1214  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1215  pkt.flags |= AV_PKT_FLAG_KEY;
1216 
1217  output_packet(of, &pkt, ost, 0);
1218  } else
1219 #endif
1220  {
1221  int forced_keyframe = 0;
1222  double pts_time;
1223 
1225  ost->top_field_first >= 0)
1226  in_picture->top_field_first = !!ost->top_field_first;
1227 
1228  if (in_picture->interlaced_frame) {
1229  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1231  else
1232  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1233  } else
1234  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1235 
1236  in_picture->quality = enc->global_quality;
1237  in_picture->pict_type = 0;
1238 
1239  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240  in_picture->pts * av_q2d(enc->time_base) : NAN;
1241  if (ost->forced_kf_index < ost->forced_kf_count &&
1242  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243  ost->forced_kf_index++;
1244  forced_keyframe = 1;
1245  } else if (ost->forced_keyframes_pexpr) {
1246  double res;
1247  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1250  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1256  res);
1257  if (res) {
1258  forced_keyframe = 1;
1264  }
1265 
1267  } else if ( ost->forced_keyframes
1268  && !strncmp(ost->forced_keyframes, "source", 6)
1269  && in_picture->key_frame==1) {
1270  forced_keyframe = 1;
1271  }
1272 
1273  if (forced_keyframe) {
1274  in_picture->pict_type = AV_PICTURE_TYPE_I;
1275  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276  }
1277 
1279  if (debug_ts) {
1280  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283  enc->time_base.num, enc->time_base.den);
1284  }
1285 
1286  ost->frames_encoded++;
1287 
1288  ret = avcodec_send_frame(enc, in_picture);
1289  if (ret < 0)
1290  goto error;
1291 
1292  while (1) {
1293  ret = avcodec_receive_packet(enc, &pkt);
1294  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295  if (ret == AVERROR(EAGAIN))
1296  break;
1297  if (ret < 0)
1298  goto error;
1299 
1300  if (debug_ts) {
1301  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1305  }
1306 
1307  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308  pkt.pts = ost->sync_opts;
1309 
1310  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1311 
1312  if (debug_ts) {
1313  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1317  }
1318 
1319  frame_size = pkt.size;
1320  output_packet(of, &pkt, ost, 0);
1321 
1322  /* if two pass, output log */
1323  if (ost->logfile && enc->stats_out) {
1324  fprintf(ost->logfile, "%s", enc->stats_out);
1325  }
1326  }
1327  }
1328  ost->sync_opts++;
1329  /*
1330  * For video, number of frames in == number of packets out.
1331  * But there may be reordering, so we can't throw away frames on encoder
1332  * flush, we need to limit them here, before they go into encoder.
1333  */
1334  ost->frame_number++;
1335 
1336  if (vstats_filename && frame_size)
1337  do_video_stats(ost, frame_size);
1338  }
1339 
1340  if (!ost->last_frame)
1341  ost->last_frame = av_frame_alloc();
1342  av_frame_unref(ost->last_frame);
1343  if (next_picture && ost->last_frame)
1344  av_frame_ref(ost->last_frame, next_picture);
1345  else
1346  av_frame_free(&ost->last_frame);
1347 
1348  return;
1349 error:
1350  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1351  exit_program(1);
1352 }
1353 
1354 static double psnr(double d)
1355 {
1356  return -10.0 * log10(d);
1357 }
1358 
1360 {
1361  AVCodecContext *enc;
1362  int frame_number;
1363  double ti1, bitrate, avg_bitrate;
1364 
1365  /* this is executed just the first time do_video_stats is called */
1366  if (!vstats_file) {
1367  vstats_file = fopen(vstats_filename, "w");
1368  if (!vstats_file) {
1369  perror("fopen");
1370  exit_program(1);
1371  }
1372  }
1373 
1374  enc = ost->enc_ctx;
1375  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376  frame_number = ost->st->nb_frames;
1377  if (vstats_version <= 1) {
1378  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379  ost->quality / (float)FF_QP2LAMBDA);
1380  } else {
1381  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382  ost->quality / (float)FF_QP2LAMBDA);
1383  }
1384 
1385  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 
1388  fprintf(vstats_file,"f_size= %6d ", frame_size);
1389  /* compute pts value */
1390  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1391  if (ti1 < 0.01)
1392  ti1 = 0.01;
1393 
1394  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1399  }
1400 }
1401 
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 
1405 {
1406  OutputFile *of = output_files[ost->file_index];
1407  int i;
1408 
1410 
1411  if (of->shortest) {
1412  for (i = 0; i < of->ctx->nb_streams; i++)
1413  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414  }
1415 }
1416 
1417 /**
1418  * Get and encode new output from any of the filtergraphs, without causing
1419  * activity.
1420  *
1421  * @return 0 for success, <0 for severe errors
1422  */
1423 static int reap_filters(int flush)
1424 {
1425  AVFrame *filtered_frame = NULL;
1426  int i;
1427 
1428  /* Reap all buffers present in the buffer sinks */
1429  for (i = 0; i < nb_output_streams; i++) {
1430  OutputStream *ost = output_streams[i];
1431  OutputFile *of = output_files[ost->file_index];
1433  AVCodecContext *enc = ost->enc_ctx;
1434  int ret = 0;
1435 
1436  if (!ost->filter || !ost->filter->graph->graph)
1437  continue;
1438  filter = ost->filter->filter;
1439 
1440  if (!ost->initialized) {
1441  char error[1024] = "";
1442  ret = init_output_stream(ost, error, sizeof(error));
1443  if (ret < 0) {
1444  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445  ost->file_index, ost->index, error);
1446  exit_program(1);
1447  }
1448  }
1449 
1450  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451  return AVERROR(ENOMEM);
1452  }
1453  filtered_frame = ost->filtered_frame;
1454 
1455  while (1) {
1456  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459  if (ret < 0) {
1460  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463  } else if (flush && ret == AVERROR_EOF) {
1465  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1466  }
1467  break;
1468  }
1469  if (ost->finished) {
1470  av_frame_unref(filtered_frame);
1471  continue;
1472  }
1473  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475  AVRational filter_tb = av_buffersink_get_time_base(filter);
1476  AVRational tb = enc->time_base;
1477  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 
1479  tb.den <<= extra_bits;
1480  float_pts =
1481  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483  float_pts /= 1 << extra_bits;
1484  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 
1487  filtered_frame->pts =
1488  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1490  }
1491  //if (ost->source_index >= 0)
1492  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1493 
1494  switch (av_buffersink_get_type(filter)) {
1495  case AVMEDIA_TYPE_VIDEO:
1496  if (!ost->frame_aspect_ratio.num)
1497  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1498 
1499  if (debug_ts) {
1500  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1502  float_pts,
1503  enc->time_base.num, enc->time_base.den);
1504  }
1505 
1506  do_video_out(of, ost, filtered_frame, float_pts);
1507  break;
1508  case AVMEDIA_TYPE_AUDIO:
1509  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510  enc->channels != filtered_frame->channels) {
1512  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1513  break;
1514  }
1515  do_audio_out(of, ost, filtered_frame);
1516  break;
1517  default:
1518  // TODO support subtitle filters
1519  av_assert0(0);
1520  }
1521 
1522  av_frame_unref(filtered_frame);
1523  }
1524  }
1525 
1526  return 0;
1527 }
1528 
1529 static void print_final_stats(int64_t total_size)
1530 {
1531  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532  uint64_t subtitle_size = 0;
1533  uint64_t data_size = 0;
1534  float percent = -1.0;
1535  int i, j;
1536  int pass1_used = 1;
1537 
1538  for (i = 0; i < nb_output_streams; i++) {
1539  OutputStream *ost = output_streams[i];
1540  switch (ost->enc_ctx->codec_type) {
1541  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544  default: other_size += ost->data_size; break;
1545  }
1546  extra_size += ost->enc_ctx->extradata_size;
1547  data_size += ost->data_size;
1550  pass1_used = 0;
1551  }
1552 
1553  if (data_size && total_size>0 && total_size >= data_size)
1554  percent = 100.0 * (total_size - data_size) / data_size;
1555 
1556  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557  video_size / 1024.0,
1558  audio_size / 1024.0,
1559  subtitle_size / 1024.0,
1560  other_size / 1024.0,
1561  extra_size / 1024.0);
1562  if (percent >= 0.0)
1563  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1564  else
1565  av_log(NULL, AV_LOG_INFO, "unknown");
1566  av_log(NULL, AV_LOG_INFO, "\n");
1567 
1568  /* print verbose per-stream stats */
1569  for (i = 0; i < nb_input_files; i++) {
1570  InputFile *f = input_files[i];
1571  uint64_t total_packets = 0, total_size = 0;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574  i, f->ctx->filename);
1575 
1576  for (j = 0; j < f->nb_streams; j++) {
1577  InputStream *ist = input_streams[f->ist_index + j];
1578  enum AVMediaType type = ist->dec_ctx->codec_type;
1579 
1580  total_size += ist->data_size;
1581  total_packets += ist->nb_packets;
1582 
1583  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584  i, j, media_type_string(type));
1585  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586  ist->nb_packets, ist->data_size);
1587 
1588  if (ist->decoding_needed) {
1589  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590  ist->frames_decoded);
1591  if (type == AVMEDIA_TYPE_AUDIO)
1592  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593  av_log(NULL, AV_LOG_VERBOSE, "; ");
1594  }
1595 
1596  av_log(NULL, AV_LOG_VERBOSE, "\n");
1597  }
1598 
1599  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600  total_packets, total_size);
1601  }
1602 
1603  for (i = 0; i < nb_output_files; i++) {
1604  OutputFile *of = output_files[i];
1605  uint64_t total_packets = 0, total_size = 0;
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608  i, of->ctx->filename);
1609 
1610  for (j = 0; j < of->ctx->nb_streams; j++) {
1611  OutputStream *ost = output_streams[of->ost_index + j];
1612  enum AVMediaType type = ost->enc_ctx->codec_type;
1613 
1614  total_size += ost->data_size;
1615  total_packets += ost->packets_written;
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618  i, j, media_type_string(type));
1619  if (ost->encoding_needed) {
1620  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621  ost->frames_encoded);
1622  if (type == AVMEDIA_TYPE_AUDIO)
1623  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624  av_log(NULL, AV_LOG_VERBOSE, "; ");
1625  }
1626 
1627  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628  ost->packets_written, ost->data_size);
1629 
1630  av_log(NULL, AV_LOG_VERBOSE, "\n");
1631  }
1632 
1633  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634  total_packets, total_size);
1635  }
1636  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1638  if (pass1_used) {
1639  av_log(NULL, AV_LOG_WARNING, "\n");
1640  } else {
1641  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1642  }
1643  }
1644 }
1645 
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1647 {
1648  char buf[1024];
1649  AVBPrint buf_script;
1650  OutputStream *ost;
1651  AVFormatContext *oc;
1652  int64_t total_size;
1653  AVCodecContext *enc;
1654  int frame_number, vid, i;
1655  double bitrate;
1656  double speed;
1657  int64_t pts = INT64_MIN + 1;
1658  static int64_t last_time = -1;
1659  static int qp_histogram[52];
1660  int hours, mins, secs, us;
1661  int ret;
1662  float t;
1663 
1664  if (!print_stats && !is_last_report && !progress_avio)
1665  return;
1666 
1667  if (!is_last_report) {
1668  if (last_time == -1) {
1669  last_time = cur_time;
1670  return;
1671  }
1672  if ((cur_time - last_time) < 500000)
1673  return;
1674  last_time = cur_time;
1675  }
1676 
1677  t = (cur_time-timer_start) / 1000000.0;
1678 
1679 
1680  oc = output_files[0]->ctx;
1681 
1682  total_size = avio_size(oc->pb);
1683  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684  total_size = avio_tell(oc->pb);
1685 
1686  buf[0] = '\0';
1687  vid = 0;
1688  av_bprint_init(&buf_script, 0, 1);
1689  for (i = 0; i < nb_output_streams; i++) {
1690  float q = -1;
1691  ost = output_streams[i];
1692  enc = ost->enc_ctx;
1693  if (!ost->stream_copy)
1694  q = ost->quality / (float) FF_QP2LAMBDA;
1695 
1696  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1698  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699  ost->file_index, ost->index, q);
1700  }
1701  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702  float fps;
1703 
1704  frame_number = ost->frame_number;
1705  fps = t > 1 ? frame_number / t : 0;
1706  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1707  frame_number, fps < 9.95, fps, q);
1708  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1710  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711  ost->file_index, ost->index, q);
1712  if (is_last_report)
1713  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1714  if (qp_hist) {
1715  int j;
1716  int qp = lrintf(q);
1717  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1718  qp_histogram[qp]++;
1719  for (j = 0; j < 32; j++)
1720  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1721  }
1722 
1723  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1724  int j;
1725  double error, error_sum = 0;
1726  double scale, scale_sum = 0;
1727  double p;
1728  char type[3] = { 'Y','U','V' };
1729  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1730  for (j = 0; j < 3; j++) {
1731  if (is_last_report) {
1732  error = enc->error[j];
1733  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1734  } else {
1735  error = ost->error[j];
1736  scale = enc->width * enc->height * 255.0 * 255.0;
1737  }
1738  if (j)
1739  scale /= 4;
1740  error_sum += error;
1741  scale_sum += scale;
1742  p = psnr(error / scale);
1743  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1744  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745  ost->file_index, ost->index, type[j] | 32, p);
1746  }
1747  p = psnr(error_sum / scale_sum);
1748  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1749  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750  ost->file_index, ost->index, p);
1751  }
1752  vid = 1;
1753  }
1754  /* compute min output value */
1756  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757  ost->st->time_base, AV_TIME_BASE_Q));
1758  if (is_last_report)
1759  nb_frames_drop += ost->last_dropped;
1760  }
1761 
1762  secs = FFABS(pts) / AV_TIME_BASE;
1763  us = FFABS(pts) % AV_TIME_BASE;
1764  mins = secs / 60;
1765  secs %= 60;
1766  hours = mins / 60;
1767  mins %= 60;
1768 
1769  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 
1772  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1773  "size=N/A time=");
1774  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775  "size=%8.0fkB time=", total_size / 1024.0);
1776  if (pts < 0)
1777  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1778  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1779  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1780  (100 * us) / AV_TIME_BASE);
1781 
1782  if (bitrate < 0) {
1783  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1784  av_bprintf(&buf_script, "bitrate=N/A\n");
1785  }else{
1786  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1787  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1788  }
1789 
1790  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1793  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1794  hours, mins, secs, us);
1795 
1797  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1799  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1800  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1801 
1802  if (speed < 0) {
1803  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1804  av_bprintf(&buf_script, "speed=N/A\n");
1805  } else {
1806  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1807  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1808  }
1809 
1810  if (print_stats || is_last_report) {
1811  const char end = is_last_report ? '\n' : '\r';
1812  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1813  fprintf(stderr, "%s %c", buf, end);
1814  } else
1815  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1816 
1817  fflush(stderr);
1818  }
1819 
1820  if (progress_avio) {
1821  av_bprintf(&buf_script, "progress=%s\n",
1822  is_last_report ? "end" : "continue");
1823  avio_write(progress_avio, buf_script.str,
1824  FFMIN(buf_script.len, buf_script.size - 1));
1825  avio_flush(progress_avio);
1826  av_bprint_finalize(&buf_script, NULL);
1827  if (is_last_report) {
1828  if ((ret = avio_closep(&progress_avio)) < 0)
1830  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1831  }
1832  }
1833 
1834  if (is_last_report)
1835  print_final_stats(total_size);
1836 }
1837 
1838 static void flush_encoders(void)
1839 {
1840  int i, ret;
1841 
1842  for (i = 0; i < nb_output_streams; i++) {
1843  OutputStream *ost = output_streams[i];
1844  AVCodecContext *enc = ost->enc_ctx;
1845  OutputFile *of = output_files[ost->file_index];
1846 
1847  if (!ost->encoding_needed)
1848  continue;
1849 
1850  // Try to enable encoding with no input frames.
1851  // Maybe we should just let encoding fail instead.
1852  if (!ost->initialized) {
1853  FilterGraph *fg = ost->filter->graph;
1854  char error[1024] = "";
1855 
1857  "Finishing stream %d:%d without any data written to it.\n",
1858  ost->file_index, ost->st->index);
1859 
1860  if (ost->filter && !fg->graph) {
1861  int x;
1862  for (x = 0; x < fg->nb_inputs; x++) {
1863  InputFilter *ifilter = fg->inputs[x];
1864  if (ifilter->format < 0) {
1865  AVCodecParameters *par = ifilter->ist->st->codecpar;
1866  // We never got any input. Set a fake format, which will
1867  // come from libavformat.
1868  ifilter->format = par->format;
1869  ifilter->sample_rate = par->sample_rate;
1870  ifilter->channels = par->channels;
1871  ifilter->channel_layout = par->channel_layout;
1872  ifilter->width = par->width;
1873  ifilter->height = par->height;
1874  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1875  }
1876  }
1877 
1879  continue;
1880 
1881  ret = configure_filtergraph(fg);
1882  if (ret < 0) {
1883  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1884  exit_program(1);
1885  }
1886 
1887  finish_output_stream(ost);
1888  }
1889 
1890  ret = init_output_stream(ost, error, sizeof(error));
1891  if (ret < 0) {
1892  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1893  ost->file_index, ost->index, error);
1894  exit_program(1);
1895  }
1896  }
1897 
1898  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1899  continue;
1900 #if FF_API_LAVF_FMT_RAWPICTURE
1901  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1902  continue;
1903 #endif
1904 
1906  continue;
1907 
1908  for (;;) {
1909  const char *desc = NULL;
1910  AVPacket pkt;
1911  int pkt_size;
1912 
1913  switch (enc->codec_type) {
1914  case AVMEDIA_TYPE_AUDIO:
1915  desc = "audio";
1916  break;
1917  case AVMEDIA_TYPE_VIDEO:
1918  desc = "video";
1919  break;
1920  default:
1921  av_assert0(0);
1922  }
1923 
1924  av_init_packet(&pkt);
1925  pkt.data = NULL;
1926  pkt.size = 0;
1927 
1929 
1930  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1931  ret = avcodec_send_frame(enc, NULL);
1932  if (ret < 0) {
1933  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1934  desc,
1935  av_err2str(ret));
1936  exit_program(1);
1937  }
1938  }
1939 
1940  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1941  if (ret < 0 && ret != AVERROR_EOF) {
1942  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1943  desc,
1944  av_err2str(ret));
1945  exit_program(1);
1946  }
1947  if (ost->logfile && enc->stats_out) {
1948  fprintf(ost->logfile, "%s", enc->stats_out);
1949  }
1950  if (ret == AVERROR_EOF) {
1951  output_packet(of, &pkt, ost, 1);
1952  break;
1953  }
1954  if (ost->finished & MUXER_FINISHED) {
1955  av_packet_unref(&pkt);
1956  continue;
1957  }
1958  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959  pkt_size = pkt.size;
1960  output_packet(of, &pkt, ost, 0);
1962  do_video_stats(ost, pkt_size);
1963  }
1964  }
1965  }
1966 }
1967 
1968 /*
1969  * Check whether a packet from ist should be written into ost at this time
1970  */
1972 {
1973  OutputFile *of = output_files[ost->file_index];
1974  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1975 
1976  if (ost->source_index != ist_index)
1977  return 0;
1978 
1979  if (ost->finished)
1980  return 0;
1981 
1982  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1983  return 0;
1984 
1985  return 1;
1986 }
1987 
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1989 {
1990  OutputFile *of = output_files[ost->file_index];
1991  InputFile *f = input_files [ist->file_index];
1992  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1994  AVPicture pict;
1995  AVPacket opkt;
1996 
1997  av_init_packet(&opkt);
1998 
1999  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2001  return;
2002 
2003  if (!ost->frame_number && !ost->copy_prior_start) {
2004  int64_t comp_start = start_time;
2005  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007  if (pkt->pts == AV_NOPTS_VALUE ?
2008  ist->pts < comp_start :
2009  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2010  return;
2011  }
2012 
2013  if (of->recording_time != INT64_MAX &&
2014  ist->pts >= of->recording_time + start_time) {
2015  close_output_stream(ost);
2016  return;
2017  }
2018 
2019  if (f->recording_time != INT64_MAX) {
2020  start_time = f->ctx->start_time;
2021  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022  start_time += f->start_time;
2023  if (ist->pts >= f->recording_time + start_time) {
2024  close_output_stream(ost);
2025  return;
2026  }
2027  }
2028 
2029  /* force the input stream PTS */
2030  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2031  ost->sync_opts++;
2032 
2033  if (pkt->pts != AV_NOPTS_VALUE)
2034  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2035  else
2036  opkt.pts = AV_NOPTS_VALUE;
2037 
2038  if (pkt->dts == AV_NOPTS_VALUE)
2039  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2040  else
2041  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042  opkt.dts -= ost_tb_start_time;
2043 
2044  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2046  if(!duration)
2047  duration = ist->dec_ctx->frame_size;
2048  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2050  ost->mux_timebase) - ost_tb_start_time;
2051  }
2052 
2053  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2054 
2055  opkt.flags = pkt->flags;
2056  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2061  ) {
2062  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063  &opkt.data, &opkt.size,
2064  pkt->data, pkt->size,
2066  if (ret < 0) {
2067  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2068  av_err2str(ret));
2069  exit_program(1);
2070  }
2071  if (ret) {
2072  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2073  if (!opkt.buf)
2074  exit_program(1);
2075  }
2076  } else {
2077  opkt.data = pkt->data;
2078  opkt.size = pkt->size;
2079  }
2080  av_copy_packet_side_data(&opkt, pkt);
2081 
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086  /* store AVPicture in AVPacket, as expected by the output format */
2087  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2088  if (ret < 0) {
2089  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2090  av_err2str(ret));
2091  exit_program(1);
2092  }
2093  opkt.data = (uint8_t *)&pict;
2094  opkt.size = sizeof(AVPicture);
2095  opkt.flags |= AV_PKT_FLAG_KEY;
2096  }
2097 #endif
2098 
2099  output_packet(of, &opkt, ost, 0);
2100 }
2101 
2103 {
2104  AVCodecContext *dec = ist->dec_ctx;
2105 
2106  if (!dec->channel_layout) {
2107  char layout_name[256];
2108 
2109  if (dec->channels > ist->guess_layout_max)
2110  return 0;
2112  if (!dec->channel_layout)
2113  return 0;
2114  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115  dec->channels, dec->channel_layout);
2116  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2118  }
2119  return 1;
2120 }
2121 
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2123 {
2124  if (*got_output || ret<0)
2125  decode_error_stat[ret<0] ++;
2126 
2127  if (ret < 0 && exit_on_error)
2128  exit_program(1);
2129 
2130  if (exit_on_error && *got_output && ist) {
2132  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2133  exit_program(1);
2134  }
2135  }
2136 }
2137 
2138 // Filters can be configured only if the formats of all inputs are known.
2140 {
2141  int i;
2142  for (i = 0; i < fg->nb_inputs; i++) {
2143  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2145  return 0;
2146  }
2147  return 1;
2148 }
2149 
2151 {
2152  FilterGraph *fg = ifilter->graph;
2153  int need_reinit, ret, i;
2154 
2155  /* determine if the parameters for this input changed */
2156  need_reinit = ifilter->format != frame->format;
2157  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2159  need_reinit = 1;
2160 
2161  switch (ifilter->ist->st->codecpar->codec_type) {
2162  case AVMEDIA_TYPE_AUDIO:
2163  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164  ifilter->channels != frame->channels ||
2165  ifilter->channel_layout != frame->channel_layout;
2166  break;
2167  case AVMEDIA_TYPE_VIDEO:
2168  need_reinit |= ifilter->width != frame->width ||
2169  ifilter->height != frame->height;
2170  break;
2171  }
2172 
2173  if (need_reinit) {
2174  ret = ifilter_parameters_from_frame(ifilter, frame);
2175  if (ret < 0)
2176  return ret;
2177  }
2178 
2179  /* (re)init the graph if possible, otherwise buffer the frame and return */
2180  if (need_reinit || !fg->graph) {
2181  for (i = 0; i < fg->nb_inputs; i++) {
2182  if (!ifilter_has_all_input_formats(fg)) {
2183  AVFrame *tmp = av_frame_clone(frame);
2184  if (!tmp)
2185  return AVERROR(ENOMEM);
2186  av_frame_unref(frame);
2187 
2188  if (!av_fifo_space(ifilter->frame_queue)) {
2189  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2190  if (ret < 0) {
2191  av_frame_free(&tmp);
2192  return ret;
2193  }
2194  }
2195  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2196  return 0;
2197  }
2198  }
2199 
2200  ret = reap_filters(1);
2201  if (ret < 0 && ret != AVERROR_EOF) {
2202  char errbuf[128];
2203  av_strerror(ret, errbuf, sizeof(errbuf));
2204 
2205  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2206  return ret;
2207  }
2208 
2209  ret = configure_filtergraph(fg);
2210  if (ret < 0) {
2211  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2212  return ret;
2213  }
2214  }
2215 
2217  if (ret < 0) {
2218  if (ret != AVERROR_EOF)
2219  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2220  return ret;
2221  }
2222 
2223  return 0;
2224 }
2225 
2226 static int ifilter_send_eof(InputFilter *ifilter)
2227 {
2228  int i, j, ret;
2229 
2230  ifilter->eof = 1;
2231 
2232  if (ifilter->filter) {
2234  if (ret < 0)
2235  return ret;
2236  } else {
2237  // the filtergraph was never configured
2238  FilterGraph *fg = ifilter->graph;
2239  for (i = 0; i < fg->nb_inputs; i++)
2240  if (!fg->inputs[i]->eof)
2241  break;
2242  if (i == fg->nb_inputs) {
2243  // All the input streams have finished without the filtergraph
2244  // ever being configured.
2245  // Mark the output streams as finished.
2246  for (j = 0; j < fg->nb_outputs; j++)
2247  finish_output_stream(fg->outputs[j]->ost);
2248  }
2249  }
2250 
2251  return 0;
2252 }
2253 
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2257 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2259 {
2260  int ret;
2261 
2262  *got_frame = 0;
2263 
2264  if (pkt) {
2265  ret = avcodec_send_packet(avctx, pkt);
2266  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267  // decoded frames with avcodec_receive_frame() until done.
2268  if (ret < 0 && ret != AVERROR_EOF)
2269  return ret;
2270  }
2271 
2272  ret = avcodec_receive_frame(avctx, frame);
2273  if (ret < 0 && ret != AVERROR(EAGAIN))
2274  return ret;
2275  if (ret >= 0)
2276  *got_frame = 1;
2277 
2278  return 0;
2279 }
2280 
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2282 {
2283  int i, ret;
2284  AVFrame *f;
2285 
2286  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287  for (i = 0; i < ist->nb_filters; i++) {
2288  if (i < ist->nb_filters - 1) {
2289  f = ist->filter_frame;
2290  ret = av_frame_ref(f, decoded_frame);
2291  if (ret < 0)
2292  break;
2293  } else
2294  f = decoded_frame;
2295  ret = ifilter_send_frame(ist->filters[i], f);
2296  if (ret == AVERROR_EOF)
2297  ret = 0; /* ignore */
2298  if (ret < 0) {
2300  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2301  break;
2302  }
2303  }
2304  return ret;
2305 }
2306 
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2308  int *decode_failed)
2309 {
2310  AVFrame *decoded_frame;
2311  AVCodecContext *avctx = ist->dec_ctx;
2312  int ret, err = 0;
2313  AVRational decoded_frame_tb;
2314 
2315  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316  return AVERROR(ENOMEM);
2317  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318  return AVERROR(ENOMEM);
2319  decoded_frame = ist->decoded_frame;
2320 
2322  ret = decode(avctx, decoded_frame, got_output, pkt);
2323  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2324  if (ret < 0)
2325  *decode_failed = 1;
2326 
2327  if (ret >= 0 && avctx->sample_rate <= 0) {
2328  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329  ret = AVERROR_INVALIDDATA;
2330  }
2331 
2332  if (ret != AVERROR_EOF)
2333  check_decode_result(ist, got_output, ret);
2334 
2335  if (!*got_output || ret < 0)
2336  return ret;
2337 
2338  ist->samples_decoded += decoded_frame->nb_samples;
2339  ist->frames_decoded++;
2340 
2341 #if 1
2342  /* increment next_dts to use for the case where the input stream does not
2343  have timestamps or there are multiple frames in the packet */
2344  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2345  avctx->sample_rate;
2346  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2347  avctx->sample_rate;
2348 #endif
2349 
2350  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351  decoded_frame_tb = ist->st->time_base;
2352  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353  decoded_frame->pts = pkt->pts;
2354  decoded_frame_tb = ist->st->time_base;
2355  }else {
2356  decoded_frame->pts = ist->dts;
2357  decoded_frame_tb = AV_TIME_BASE_Q;
2358  }
2359  if (decoded_frame->pts != AV_NOPTS_VALUE)
2360  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362  (AVRational){1, avctx->sample_rate});
2363  ist->nb_samples = decoded_frame->nb_samples;
2364  err = send_frame_to_filters(ist, decoded_frame);
2365 
2366  av_frame_unref(ist->filter_frame);
2367  av_frame_unref(decoded_frame);
2368  return err < 0 ? err : ret;
2369 }
2370 
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2372  int *decode_failed)
2373 {
2374  AVFrame *decoded_frame;
2375  int i, ret = 0, err = 0;
2376  int64_t best_effort_timestamp;
2377  int64_t dts = AV_NOPTS_VALUE;
2378  AVPacket avpkt;
2379 
2380  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2382  // skip the packet.
2383  if (!eof && pkt && pkt->size == 0)
2384  return 0;
2385 
2386  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387  return AVERROR(ENOMEM);
2388  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389  return AVERROR(ENOMEM);
2390  decoded_frame = ist->decoded_frame;
2391  if (ist->dts != AV_NOPTS_VALUE)
2392  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2393  if (pkt) {
2394  avpkt = *pkt;
2395  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2396  }
2397 
2398  // The old code used to set dts on the drain packet, which does not work
2399  // with the new API anymore.
2400  if (eof) {
2401  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2402  if (!new)
2403  return AVERROR(ENOMEM);
2404  ist->dts_buffer = new;
2405  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2406  }
2407 
2409  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2411  if (ret < 0)
2412  *decode_failed = 1;
2413 
2414  // The following line may be required in some cases where there is no parser
2415  // or the parser does not has_b_frames correctly
2416  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2419  } else
2421  "video_delay is larger in decoder than demuxer %d > %d.\n"
2422  "If you want to help, upload a sample "
2423  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425  ist->dec_ctx->has_b_frames,
2426  ist->st->codecpar->video_delay);
2427  }
2428 
2429  if (ret != AVERROR_EOF)
2430  check_decode_result(ist, got_output, ret);
2431 
2432  if (*got_output && ret >= 0) {
2433  if (ist->dec_ctx->width != decoded_frame->width ||
2434  ist->dec_ctx->height != decoded_frame->height ||
2435  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437  decoded_frame->width,
2438  decoded_frame->height,
2439  decoded_frame->format,
2440  ist->dec_ctx->width,
2441  ist->dec_ctx->height,
2442  ist->dec_ctx->pix_fmt);
2443  }
2444  }
2445 
2446  if (!*got_output || ret < 0)
2447  return ret;
2448 
2449  if(ist->top_field_first>=0)
2450  decoded_frame->top_field_first = ist->top_field_first;
2451 
2452  ist->frames_decoded++;
2453 
2454  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2456  if (err < 0)
2457  goto fail;
2458  }
2459  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2460 
2461  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2462 
2463  if (ist->framerate.num)
2464  best_effort_timestamp = ist->cfr_next_pts++;
2465 
2466  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2467  best_effort_timestamp = ist->dts_buffer[0];
2468 
2469  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2470  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2471  ist->nb_dts_buffer--;
2472  }
2473 
2474  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2475  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2476 
2477  if (ts != AV_NOPTS_VALUE)
2478  ist->next_pts = ist->pts = ts;
2479  }
2480 
2481  if (debug_ts) {
2482  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2483  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2484  ist->st->index, av_ts2str(decoded_frame->pts),
2485  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2486  best_effort_timestamp,
2487  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2488  decoded_frame->key_frame, decoded_frame->pict_type,
2489  ist->st->time_base.num, ist->st->time_base.den);
2490  }
2491 
2492  if (ist->st->sample_aspect_ratio.num)
2493  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2494 
2495  err = send_frame_to_filters(ist, decoded_frame);
2496 
2497 fail:
2499  av_frame_unref(decoded_frame);
2500  return err < 0 ? err : ret;
2501 }
2502 
2503 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2504  int *decode_failed)
2505 {
2506  AVSubtitle subtitle;
2507  int free_sub = 1;
2508  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2509  &subtitle, got_output, pkt);
2510 
2511  check_decode_result(NULL, got_output, ret);
2512 
2513  if (ret < 0 || !*got_output) {
2514  *decode_failed = 1;
2515  if (!pkt->size)
2516  sub2video_flush(ist);
2517  return ret;
2518  }
2519 
2520  if (ist->fix_sub_duration) {
2521  int end = 1;
2522  if (ist->prev_sub.got_output) {
2523  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2524  1000, AV_TIME_BASE);
2525  if (end < ist->prev_sub.subtitle.end_display_time) {
2526  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2527  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2529  end <= 0 ? ", dropping it" : "");
2531  }
2532  }
2533  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2534  FFSWAP(int, ret, ist->prev_sub.ret);
2535  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2536  if (end <= 0)
2537  goto out;
2538  }
2539 
2540  if (!*got_output)
2541  return ret;
2542 
2543  if (ist->sub2video.frame) {
2544  sub2video_update(ist, &subtitle);
2545  } else if (ist->nb_filters) {
2546  if (!ist->sub2video.sub_queue)
2547  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2548  if (!ist->sub2video.sub_queue)
2549  exit_program(1);
2550  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2552  if (ret < 0)
2553  exit_program(1);
2554  }
2555  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2556  free_sub = 0;
2557  }
2558 
2559  if (!subtitle.num_rects)
2560  goto out;
2561 
2562  ist->frames_decoded++;
2563 
2564  for (i = 0; i < nb_output_streams; i++) {
2565  OutputStream *ost = output_streams[i];
2566 
2567  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2568  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2569  continue;
2570 
2571  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2572  }
2573 
2574 out:
2575  if (free_sub)
2576  avsubtitle_free(&subtitle);
2577  return ret;
2578 }
2579 
2581 {
2582  int i, ret;
2583  for (i = 0; i < ist->nb_filters; i++) {
2584  ret = ifilter_send_eof(ist->filters[i]);
2585  if (ret < 0)
2586  return ret;
2587  }
2588  return 0;
2589 }
2590 
2591 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2592 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2593 {
2594  int ret = 0, i;
2595  int repeating = 0;
2596  int eof_reached = 0;
2597 
2598  AVPacket avpkt;
2599  if (!ist->saw_first_ts) {
2600  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2601  ist->pts = 0;
2602  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2603  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2604  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2605  }
2606  ist->saw_first_ts = 1;
2607  }
2608 
2609  if (ist->next_dts == AV_NOPTS_VALUE)
2610  ist->next_dts = ist->dts;
2611  if (ist->next_pts == AV_NOPTS_VALUE)
2612  ist->next_pts = ist->pts;
2613 
2614  if (!pkt) {
2615  /* EOF handling */
2616  av_init_packet(&avpkt);
2617  avpkt.data = NULL;
2618  avpkt.size = 0;
2619  } else {
2620  avpkt = *pkt;
2621  }
2622 
2623  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2624  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2625  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2626  ist->next_pts = ist->pts = ist->dts;
2627  }
2628 
2629  // while we have more to decode or while the decoder did output something on EOF
2630  while (ist->decoding_needed) {
2631  int64_t duration = 0;
2632  int got_output = 0;
2633  int decode_failed = 0;
2634 
2635  ist->pts = ist->next_pts;
2636  ist->dts = ist->next_dts;
2637 
2638  switch (ist->dec_ctx->codec_type) {
2639  case AVMEDIA_TYPE_AUDIO:
2640  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2641  &decode_failed);
2642  break;
2643  case AVMEDIA_TYPE_VIDEO:
2644  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2645  &decode_failed);
2646  if (!repeating || !pkt || got_output) {
2647  if (pkt && pkt->duration) {
2648  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2651  duration = ((int64_t)AV_TIME_BASE *
2652  ist->dec_ctx->framerate.den * ticks) /
2654  }
2655 
2656  if(ist->dts != AV_NOPTS_VALUE && duration) {
2657  ist->next_dts += duration;
2658  }else
2659  ist->next_dts = AV_NOPTS_VALUE;
2660  }
2661 
2662  if (got_output)
2663  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2664  break;
2665  case AVMEDIA_TYPE_SUBTITLE:
2666  if (repeating)
2667  break;
2668  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2669  if (!pkt && ret >= 0)
2670  ret = AVERROR_EOF;
2671  break;
2672  default:
2673  return -1;
2674  }
2675 
2676  if (ret == AVERROR_EOF) {
2677  eof_reached = 1;
2678  break;
2679  }
2680 
2681  if (ret < 0) {
2682  if (decode_failed) {
2683  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2684  ist->file_index, ist->st->index, av_err2str(ret));
2685  } else {
2686  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2687  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2688  }
2689  if (!decode_failed || exit_on_error)
2690  exit_program(1);
2691  break;
2692  }
2693 
2694  if (got_output)
2695  ist->got_output = 1;
2696 
2697  if (!got_output)
2698  break;
2699 
2700  // During draining, we might get multiple output frames in this loop.
2701  // ffmpeg.c does not drain the filter chain on configuration changes,
2702  // which means if we send multiple frames at once to the filters, and
2703  // one of those frames changes configuration, the buffered frames will
2704  // be lost. This can upset certain FATE tests.
2705  // Decode only 1 frame per call on EOF to appease these FATE tests.
2706  // The ideal solution would be to rewrite decoding to use the new
2707  // decoding API in a better way.
2708  if (!pkt)
2709  break;
2710 
2711  repeating = 1;
2712  }
2713 
2714  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2715  /* except when looping we need to flush but not to send an EOF */
2716  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2717  int ret = send_filter_eof(ist);
2718  if (ret < 0) {
2719  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2720  exit_program(1);
2721  }
2722  }
2723 
2724  /* handle stream copy */
2725  if (!ist->decoding_needed) {
2726  ist->dts = ist->next_dts;
2727  switch (ist->dec_ctx->codec_type) {
2728  case AVMEDIA_TYPE_AUDIO:
2729  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2730  ist->dec_ctx->sample_rate;
2731  break;
2732  case AVMEDIA_TYPE_VIDEO:
2733  if (ist->framerate.num) {
2734  // TODO: Remove work-around for c99-to-c89 issue 7
2735  AVRational time_base_q = AV_TIME_BASE_Q;
2736  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2737  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2738  } else if (pkt->duration) {
2739  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2740  } else if(ist->dec_ctx->framerate.num != 0) {
2741  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2742  ist->next_dts += ((int64_t)AV_TIME_BASE *
2743  ist->dec_ctx->framerate.den * ticks) /
2745  }
2746  break;
2747  }
2748  ist->pts = ist->dts;
2749  ist->next_pts = ist->next_dts;
2750  }
2751  for (i = 0; pkt && i < nb_output_streams; i++) {
2752  OutputStream *ost = output_streams[i];
2753 
2754  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2755  continue;
2756 
2757  do_streamcopy(ist, ost, pkt);
2758  }
2759 
2760  return !eof_reached;
2761 }
2762 
2763 static void print_sdp(void)
2764 {
2765  char sdp[16384];
2766  int i;
2767  int j;
2768  AVIOContext *sdp_pb;
2769  AVFormatContext **avc;
2770 
2771  for (i = 0; i < nb_output_files; i++) {
2772  if (!output_files[i]->header_written)
2773  return;
2774  }
2775 
2776  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2777  if (!avc)
2778  exit_program(1);
2779  for (i = 0, j = 0; i < nb_output_files; i++) {
2780  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2781  avc[j] = output_files[i]->ctx;
2782  j++;
2783  }
2784  }
2785 
2786  if (!j)
2787  goto fail;
2788 
2789  av_sdp_create(avc, j, sdp, sizeof(sdp));
2790 
2791  if (!sdp_filename) {
2792  printf("SDP:\n%s\n", sdp);
2793  fflush(stdout);
2794  } else {
2795  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2796  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2797  } else {
2798  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2799  avio_closep(&sdp_pb);
2801  }
2802  }
2803 
2804 fail:
2805  av_freep(&avc);
2806 }
2807 
2809 {
2810  int i;
2811  for (i = 0; hwaccels[i].name; i++)
2812  if (hwaccels[i].pix_fmt == pix_fmt)
2813  return &hwaccels[i];
2814  return NULL;
2815 }
2816 
2818 {
2819  InputStream *ist = s->opaque;
2820  const enum AVPixelFormat *p;
2821  int ret;
2822 
2823  for (p = pix_fmts; *p != -1; p++) {
2825  const HWAccel *hwaccel;
2826 
2827  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2828  break;
2829 
2830  hwaccel = get_hwaccel(*p);
2831  if (!hwaccel ||
2832  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2833  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2834  continue;
2835 
2836  ret = hwaccel->init(s);
2837  if (ret < 0) {
2838  if (ist->hwaccel_id == hwaccel->id) {
2840  "%s hwaccel requested for input stream #%d:%d, "
2841  "but cannot be initialized.\n", hwaccel->name,
2842  ist->file_index, ist->st->index);
2843  return AV_PIX_FMT_NONE;
2844  }
2845  continue;
2846  }
2847 
2848  if (ist->hw_frames_ctx) {
2850  if (!s->hw_frames_ctx)
2851  return AV_PIX_FMT_NONE;
2852  }
2853 
2854  ist->active_hwaccel_id = hwaccel->id;
2855  ist->hwaccel_pix_fmt = *p;
2856  break;
2857  }
2858 
2859  return *p;
2860 }
2861 
2863 {
2864  InputStream *ist = s->opaque;
2865 
2866  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2867  return ist->hwaccel_get_buffer(s, frame, flags);
2868 
2869  return avcodec_default_get_buffer2(s, frame, flags);
2870 }
2871 
2872 static int init_input_stream(int ist_index, char *error, int error_len)
2873 {
2874  int ret;
2875  InputStream *ist = input_streams[ist_index];
2876 
2877  if (ist->decoding_needed) {
2878  AVCodec *codec = ist->dec;
2879  if (!codec) {
2880  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2881  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2882  return AVERROR(EINVAL);
2883  }
2884 
2885  ist->dec_ctx->opaque = ist;
2886  ist->dec_ctx->get_format = get_format;
2887  ist->dec_ctx->get_buffer2 = get_buffer;
2888  ist->dec_ctx->thread_safe_callbacks = 1;
2889 
2890  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2891  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2892  (ist->decoding_needed & DECODING_FOR_OST)) {
2893  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2895  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2896  }
2897 
2898  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2899 
2900  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2901  * audio, and video decoders such as cuvid or mediacodec */
2903 
2904  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2905  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2906 
2907  ret = hw_device_setup_for_decode(ist);
2908  if (ret < 0) {
2909  snprintf(error, error_len, "Device setup failed for "
2910  "decoder on input stream #%d:%d : %s",
2911  ist->file_index, ist->st->index, av_err2str(ret));
2912  return ret;
2913  }
2914 
2915  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2916  if (ret == AVERROR_EXPERIMENTAL)
2917  abort_codec_experimental(codec, 0);
2918 
2919  snprintf(error, error_len,
2920  "Error while opening decoder for input stream "
2921  "#%d:%d : %s",
2922  ist->file_index, ist->st->index, av_err2str(ret));
2923  return ret;
2924  }
2926  }
2927 
2928  ist->next_pts = AV_NOPTS_VALUE;
2929  ist->next_dts = AV_NOPTS_VALUE;
2930 
2931  return 0;
2932 }
2933 
2935 {
2936  if (ost->source_index >= 0)
2937  return input_streams[ost->source_index];
2938  return NULL;
2939 }
2940 
2941 static int compare_int64(const void *a, const void *b)
2942 {
2943  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2944 }
2945 
2946 /* open the muxer when all the streams are initialized */
2947 static int check_init_output_file(OutputFile *of, int file_index)
2948 {
2949  int ret, i;
2950 
2951  for (i = 0; i < of->ctx->nb_streams; i++) {
2952  OutputStream *ost = output_streams[of->ost_index + i];
2953  if (!ost->initialized)
2954  return 0;
2955  }
2956 
2957  of->ctx->interrupt_callback = int_cb;
2958 
2959  ret = avformat_write_header(of->ctx, &of->opts);
2960  if (ret < 0) {
2962  "Could not write header for output file #%d "
2963  "(incorrect codec parameters ?): %s\n",
2964  file_index, av_err2str(ret));
2965  return ret;
2966  }
2967  //assert_avoptions(of->opts);
2968  of->header_written = 1;
2969 
2970  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2971 
2972  if (sdp_filename || want_sdp)
2973  print_sdp();
2974 
2975  /* flush the muxing queues */
2976  for (i = 0; i < of->ctx->nb_streams; i++) {
2977  OutputStream *ost = output_streams[of->ost_index + i];
2978 
2979  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2980  if (!av_fifo_size(ost->muxing_queue))
2981  ost->mux_timebase = ost->st->time_base;
2982 
2983  while (av_fifo_size(ost->muxing_queue)) {
2984  AVPacket pkt;
2985  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2986  write_packet(of, &pkt, ost, 1);
2987  }
2988  }
2989 
2990  return 0;
2991 }
2992 
2994 {
2995  AVBSFContext *ctx;
2996  int i, ret;
2997 
2998  if (!ost->nb_bitstream_filters)
2999  return 0;
3000 
3001  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3002  ctx = ost->bsf_ctx[i];
3003 
3004  ret = avcodec_parameters_copy(ctx->par_in,
3005  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3006  if (ret < 0)
3007  return ret;
3008 
3009  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3010 
3011  ret = av_bsf_init(ctx);
3012  if (ret < 0) {
3013  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3014  ost->bsf_ctx[i]->filter->name);
3015  return ret;
3016  }
3017  }
3018 
3019  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3020  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3021  if (ret < 0)
3022  return ret;
3023 
3024  ost->st->time_base = ctx->time_base_out;
3025 
3026  return 0;
3027 }
3028 
3030 {
3031  OutputFile *of = output_files[ost->file_index];
3032  InputStream *ist = get_input_stream(ost);
3033  AVCodecParameters *par_dst = ost->st->codecpar;
3034  AVCodecParameters *par_src = ost->ref_par;
3035  AVRational sar;
3036  int i, ret;
3037  uint32_t codec_tag = par_dst->codec_tag;
3038 
3039  av_assert0(ist && !ost->filter);
3040 
3041  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3042  if (ret >= 0)
3043  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3044  if (ret < 0) {
3046  "Error setting up codec context options.\n");
3047  return ret;
3048  }
3050 
3051  if (!codec_tag) {
3052  unsigned int codec_tag_tmp;
3053  if (!of->ctx->oformat->codec_tag ||
3054  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3055  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3056  codec_tag = par_src->codec_tag;
3057  }
3058 
3059  ret = avcodec_parameters_copy(par_dst, par_src);
3060  if (ret < 0)
3061  return ret;
3062 
3063  par_dst->codec_tag = codec_tag;
3064 
3065  if (!ost->frame_rate.num)
3066  ost->frame_rate = ist->framerate;
3067  ost->st->avg_frame_rate = ost->frame_rate;
3068 
3070  if (ret < 0)
3071  return ret;
3072 
3073  // copy timebase while removing common factors
3074  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3076 
3077  // copy estimated duration as a hint to the muxer
3078  if (ost->st->duration <= 0 && ist->st->duration > 0)
3079  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3080 
3081  // copy disposition
3082  ost->st->disposition = ist->st->disposition;
3083 
3084  if (ist->st->nb_side_data) {
3085  for (i = 0; i < ist->st->nb_side_data; i++) {
3086  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3087  uint8_t *dst_data;
3088 
3089  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3090  if (!dst_data)
3091  return AVERROR(ENOMEM);
3092  memcpy(dst_data, sd_src->data, sd_src->size);
3093  }
3094  }
3095 
3096  if (ost->rotate_overridden) {
3098  sizeof(int32_t) * 9);
3099  if (sd)
3100  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3101  }
3102 
3103  ost->parser = av_parser_init(par_dst->codec_id);
3104  ost->parser_avctx = avcodec_alloc_context3(NULL);
3105  if (!ost->parser_avctx)
3106  return AVERROR(ENOMEM);
3107 
3108  switch (par_dst->codec_type) {
3109  case AVMEDIA_TYPE_AUDIO:
3110  if (audio_volume != 256) {
3111  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3112  exit_program(1);
3113  }
3114  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3115  par_dst->block_align= 0;
3116  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3117  par_dst->block_align= 0;
3118  break;
3119  case AVMEDIA_TYPE_VIDEO:
3120  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3121  sar =
3122  av_mul_q(ost->frame_aspect_ratio,
3123  (AVRational){ par_dst->height, par_dst->width });
3124  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3125  "with stream copy may produce invalid files\n");
3126  }
3127  else if (ist->st->sample_aspect_ratio.num)
3128  sar = ist->st->sample_aspect_ratio;
3129  else
3130  sar = par_src->sample_aspect_ratio;
3131  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3132  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3133  ost->st->r_frame_rate = ist->st->r_frame_rate;
3134  break;
3135  }
3136 
3137  ost->mux_timebase = ist->st->time_base;
3138 
3139  return 0;
3140 }
3141 
3143 {
3144  AVDictionaryEntry *e;
3145 
3146  uint8_t *encoder_string;
3147  int encoder_string_len;
3148  int format_flags = 0;
3149  int codec_flags = 0;
3150 
3151  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3152  return;
3153 
3154  e = av_dict_get(of->opts, "fflags", NULL, 0);
3155  if (e) {
3156  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3157  if (!o)
3158  return;
3159  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3160  }
3161  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3162  if (e) {
3163  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3164  if (!o)
3165  return;
3166  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3167  }
3168 
3169  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3170  encoder_string = av_mallocz(encoder_string_len);
3171  if (!encoder_string)
3172  exit_program(1);
3173 
3174  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3175  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3176  else
3177  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3178  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3179  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3181 }
3182 
3183 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3184  AVCodecContext *avctx)
3185 {
3186  char *p;
3187  int n = 1, i, size, index = 0;
3188  int64_t t, *pts;
3189 
3190  for (p = kf; *p; p++)
3191  if (*p == ',')
3192  n++;
3193  size = n;
3194  pts = av_malloc_array(size, sizeof(*pts));
3195  if (!pts) {
3196  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3197  exit_program(1);
3198  }
3199 
3200  p = kf;
3201  for (i = 0; i < n; i++) {
3202  char *next = strchr(p, ',');
3203 
3204  if (next)
3205  *next++ = 0;
3206 
3207  if (!memcmp(p, "chapters", 8)) {
3208 
3209  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3210  int j;
3211 
3212  if (avf->nb_chapters > INT_MAX - size ||
3213  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3214  sizeof(*pts)))) {
3216  "Could not allocate forced key frames array.\n");
3217  exit_program(1);
3218  }
3219  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3220  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3221 
3222  for (j = 0; j < avf->nb_chapters; j++) {
3223  AVChapter *c = avf->chapters[j];
3224  av_assert1(index < size);
3225  pts[index++] = av_rescale_q(c->start, c->time_base,
3226  avctx->time_base) + t;
3227  }
3228 
3229  } else {
3230 
3231  t = parse_time_or_die("force_key_frames", p, 1);
3232  av_assert1(index < size);
3233  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3234 
3235  }
3236 
3237  p = next;
3238  }
3239 
3240  av_assert0(index == size);
3241  qsort(pts, size, sizeof(*pts), compare_int64);
3242  ost->forced_kf_count = size;
3243  ost->forced_kf_pts = pts;
3244 }
3245 
3246 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3247 {
3248  InputStream *ist = get_input_stream(ost);
3249  AVCodecContext *enc_ctx = ost->enc_ctx;
3250  AVFormatContext *oc;
3251 
3252  if (ost->enc_timebase.num > 0) {
3253  enc_ctx->time_base = ost->enc_timebase;
3254  return;
3255  }
3256 
3257  if (ost->enc_timebase.num < 0) {
3258  if (ist) {
3259  enc_ctx->time_base = ist->st->time_base;
3260  return;
3261  }
3262 
3263  oc = output_files[ost->file_index]->ctx;
3264  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3265  }
3266 
3267  enc_ctx->time_base = default_time_base;
3268 }
3269 
3271 {
3272  InputStream *ist = get_input_stream(ost);
3273  AVCodecContext *enc_ctx = ost->enc_ctx;
3275  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3276  int j, ret;
3277 
3278  set_encoder_id(output_files[ost->file_index], ost);
3279 
3280  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3281  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3282  // which have to be filtered out to prevent leaking them to output files.
3283  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3284 
3285  if (ist) {
3286  ost->st->disposition = ist->st->disposition;
3287 
3288  dec_ctx = ist->dec_ctx;
3289 
3290  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3291  } else {
3292  for (j = 0; j < oc->nb_streams; j++) {
3293  AVStream *st = oc->streams[j];
3294  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3295  break;
3296  }
3297  if (j == oc->nb_streams)
3298  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3301  }
3302 
3303  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3304  if (!ost->frame_rate.num)
3306  if (ist && !ost->frame_rate.num)
3307  ost->frame_rate = ist->framerate;
3308  if (ist && !ost->frame_rate.num)
3309  ost->frame_rate = ist->st->r_frame_rate;
3310  if (ist && !ost->frame_rate.num) {
3311  ost->frame_rate = (AVRational){25, 1};
3313  "No information "
3314  "about the input framerate is available. Falling "
3315  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3316  "if you want a different framerate.\n",
3317  ost->file_index, ost->index);
3318  }
3319 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3320  if (ost->enc->supported_framerates && !ost->force_fps) {
3321  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3322  ost->frame_rate = ost->enc->supported_framerates[idx];
3323  }
3324  // reduce frame rate for mpeg4 to be within the spec limits
3325  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3326  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3327  ost->frame_rate.num, ost->frame_rate.den, 65535);
3328  }
3329  }
3330 
3331  switch (enc_ctx->codec_type) {
3332  case AVMEDIA_TYPE_AUDIO:
3334  if (dec_ctx)
3335  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3336  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3340 
3341  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3342  break;
3343 
3344  case AVMEDIA_TYPE_VIDEO:
3346 
3347  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3349  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3351  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3352  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3353  }
3354  for (j = 0; j < ost->forced_kf_count; j++)
3355  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3357  enc_ctx->time_base);
3358 
3359  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3360  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3361  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3362  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3363  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3365 
3366  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3367  if (dec_ctx)
3368  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3369  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3370 
3371  enc_ctx->framerate = ost->frame_rate;
3372 
3373  ost->st->avg_frame_rate = ost->frame_rate;
3374 
3375  if (!dec_ctx ||
3376  enc_ctx->width != dec_ctx->width ||
3377  enc_ctx->height != dec_ctx->height ||
3378  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3380  }
3381 
3382  if (ost->forced_keyframes) {
3383  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3386  if (ret < 0) {
3388  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3389  return ret;
3390  }
3395 
3396  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3397  // parse it only for static kf timings
3398  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3400  }
3401  }
3402  break;
3403  case AVMEDIA_TYPE_SUBTITLE:
3404  enc_ctx->time_base = AV_TIME_BASE_Q;
3405  if (!enc_ctx->width) {
3406  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3407  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3408  }
3409  break;
3410  case AVMEDIA_TYPE_DATA:
3411  break;
3412  default:
3413  abort();
3414  break;
3415  }
3416 
3417  ost->mux_timebase = enc_ctx->time_base;
3418 
3419  return 0;
3420 }
3421 
3422 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3423 {
3424  int ret = 0;
3425 
3426  if (ost->encoding_needed) {
3427  AVCodec *codec = ost->enc;
3428  AVCodecContext *dec = NULL;
3429  InputStream *ist;
3430 
3431  ret = init_output_stream_encode(ost);
3432  if (ret < 0)
3433  return ret;
3434 
3435  if ((ist = get_input_stream(ost)))
3436  dec = ist->dec_ctx;
3437  if (dec && dec->subtitle_header) {
3438  /* ASS code assumes this buffer is null terminated so add extra byte. */
3440  if (!ost->enc_ctx->subtitle_header)
3441  return AVERROR(ENOMEM);
3442  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3444  }
3445  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3446  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3447  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3448  !codec->defaults &&
3449  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3450  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3451  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3452 
3453  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3457  if (!ost->enc_ctx->hw_frames_ctx)
3458  return AVERROR(ENOMEM);
3459  } else {
3460  ret = hw_device_setup_for_encode(ost);
3461  if (ret < 0) {
3462  snprintf(error, error_len, "Device setup failed for "
3463  "encoder on output stream #%d:%d : %s",
3464  ost->file_index, ost->index, av_err2str(ret));
3465  return ret;
3466  }
3467  }
3468 
3469  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3470  if (ret == AVERROR_EXPERIMENTAL)
3471  abort_codec_experimental(codec, 1);
3472  snprintf(error, error_len,
3473  "Error while opening encoder for output stream #%d:%d - "
3474  "maybe incorrect parameters such as bit_rate, rate, width or height",
3475  ost->file_index, ost->index);
3476  return ret;
3477  }
3478  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3479  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3481  ost->enc_ctx->frame_size);
3483  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3484  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3485  " It takes bits/s as argument, not kbits/s\n");
3486 
3488  if (ret < 0) {
3490  "Error initializing the output stream codec context.\n");
3491  exit_program(1);
3492  }
3493  /*
3494  * FIXME: ost->st->codec should't be needed here anymore.
3495  */
3496  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3497  if (ret < 0)
3498  return ret;
3499 
3500  if (ost->enc_ctx->nb_coded_side_data) {
3501  int i;
3502 
3503  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3504  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3505  uint8_t *dst_data;
3506 
3507  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3508  if (!dst_data)
3509  return AVERROR(ENOMEM);
3510  memcpy(dst_data, sd_src->data, sd_src->size);
3511  }
3512  }
3513 
3514  /*
3515  * Add global input side data. For now this is naive, and copies it
3516  * from the input stream's global side data. All side data should
3517  * really be funneled over AVFrame and libavfilter, then added back to
3518  * packet side data, and then potentially using the first packet for
3519  * global side data.
3520  */
3521  if (ist) {
3522  int i;
3523  for (i = 0; i < ist->st->nb_side_data; i++) {
3524  AVPacketSideData *sd = &ist->st->side_data[i];
3525  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3526  if (!dst)
3527  return AVERROR(ENOMEM);
3528  memcpy(dst, sd->data, sd->size);
3529  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3530  av_display_rotation_set((uint32_t *)dst, 0);
3531  }
3532  }
3533 
3534  // copy timebase while removing common factors
3535  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3536  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3537 
3538  // copy estimated duration as a hint to the muxer
3539  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3540  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3541 
3542  ost->st->codec->codec= ost->enc_ctx->codec;
3543  } else if (ost->stream_copy) {
3544  ret = init_output_stream_streamcopy(ost);
3545  if (ret < 0)
3546  return ret;
3547 
3548  /*
3549  * FIXME: will the codec context used by the parser during streamcopy
3550  * This should go away with the new parser API.
3551  */
3552  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3553  if (ret < 0)
3554  return ret;
3555  }
3556 
3557  // parse user provided disposition, and update stream values
3558  if (ost->disposition) {
3559  static const AVOption opts[] = {
3560  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3561  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3562  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3563  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3564  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3565  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3566  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3567  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3568  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3569  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3570  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3571  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3572  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3573  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3574  { NULL },
3575  };
3576  static const AVClass class = {
3577  .class_name = "",
3578  .item_name = av_default_item_name,
3579  .option = opts,
3580  .version = LIBAVUTIL_VERSION_INT,
3581  };
3582  const AVClass *pclass = &class;
3583 
3584  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3585  if (ret < 0)
3586  return ret;
3587  }
3588 
3589  /* initialize bitstream filters for the output stream
3590  * needs to be done here, because the codec id for streamcopy is not
3591  * known until now */
3592  ret = init_output_bsfs(ost);
3593  if (ret < 0)
3594  return ret;
3595 
3596  ost->initialized = 1;
3597 
3598  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3599  if (ret < 0)
3600  return ret;
3601 
3602  return ret;
3603 }
3604 
3605 static void report_new_stream(int input_index, AVPacket *pkt)
3606 {
3607  InputFile *file = input_files[input_index];
3608  AVStream *st = file->ctx->streams[pkt->stream_index];
3609 
3610  if (pkt->stream_index < file->nb_streams_warn)
3611  return;
3612  av_log(file->ctx, AV_LOG_WARNING,
3613  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3615  input_index, pkt->stream_index,
3616  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3617  file->nb_streams_warn = pkt->stream_index + 1;
3618 }
3619 
3620 static int transcode_init(void)
3621 {
3622  int ret = 0, i, j, k;
3623  AVFormatContext *oc;
3624  OutputStream *ost;
3625  InputStream *ist;
3626  char error[1024] = {0};
3627 
3628  for (i = 0; i < nb_filtergraphs; i++) {
3629  FilterGraph *fg = filtergraphs[i];
3630  for (j = 0; j < fg->nb_outputs; j++) {
3631  OutputFilter *ofilter = fg->outputs[j];
3632  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3633  continue;
3634  if (fg->nb_inputs != 1)
3635  continue;
3636  for (k = nb_input_streams-1; k >= 0 ; k--)
3637  if (fg->inputs[0]->ist == input_streams[k])
3638  break;
3639  ofilter->ost->source_index = k;
3640  }
3641  }
3642 
3643  /* init framerate emulation */
3644  for (i = 0; i < nb_input_files; i++) {
3645  InputFile *ifile = input_files[i];
3646  if (ifile->rate_emu)
3647  for (j = 0; j < ifile->nb_streams; j++)
3648  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3649  }
3650 
3651  /* init input streams */
3652  for (i = 0; i < nb_input_streams; i++)
3653  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3654  for (i = 0; i < nb_output_streams; i++) {
3655  ost = output_streams[i];
3656  avcodec_close(ost->enc_ctx);
3657  }
3658  goto dump_format;
3659  }
3660 
3661  /* open each encoder */
3662  for (i = 0; i < nb_output_streams; i++) {
3663  // skip streams fed from filtergraphs until we have a frame for them
3664  if (output_streams[i]->filter)
3665  continue;
3666 
3667  ret = init_output_stream(output_streams[i], error, sizeof(error));
3668  if (ret < 0)
3669  goto dump_format;
3670  }
3671 
3672  /* discard unused programs */
3673  for (i = 0; i < nb_input_files; i++) {
3674  InputFile *ifile = input_files[i];
3675  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3676  AVProgram *p = ifile->ctx->programs[j];
3677  int discard = AVDISCARD_ALL;
3678 
3679  for (k = 0; k < p->nb_stream_indexes; k++)
3680  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3681  discard = AVDISCARD_DEFAULT;
3682  break;
3683  }
3684  p->discard = discard;
3685  }
3686  }
3687 
3688  /* write headers for files with no streams */
3689  for (i = 0; i < nb_output_files; i++) {
3690  oc = output_files[i]->ctx;
3691  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3692  ret = check_init_output_file(output_files[i], i);
3693  if (ret < 0)
3694  goto dump_format;
3695  }
3696  }
3697 
3698  dump_format:
3699  /* dump the stream mapping */
3700  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3701  for (i = 0; i < nb_input_streams; i++) {
3702  ist = input_streams[i];
3703 
3704  for (j = 0; j < ist->nb_filters; j++) {
3705  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3706  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3707  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3708  ist->filters[j]->name);
3709  if (nb_filtergraphs > 1)
3710  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3711  av_log(NULL, AV_LOG_INFO, "\n");
3712  }
3713  }
3714  }
3715 
3716  for (i = 0; i < nb_output_streams; i++) {
3717  ost = output_streams[i];
3718 
3719  if (ost->attachment_filename) {
3720  /* an attached file */
3721  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3722  ost->attachment_filename, ost->file_index, ost->index);
3723  continue;
3724  }
3725 
3726  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3727  /* output from a complex graph */
3728  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3729  if (nb_filtergraphs > 1)
3730  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3731 
3732  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3733  ost->index, ost->enc ? ost->enc->name : "?");
3734  continue;
3735  }
3736 
3737  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3738  input_streams[ost->source_index]->file_index,
3739  input_streams[ost->source_index]->st->index,
3740  ost->file_index,
3741  ost->index);
3742  if (ost->sync_ist != input_streams[ost->source_index])
3743  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3744  ost->sync_ist->file_index,
3745  ost->sync_ist->st->index);
3746  if (ost->stream_copy)
3747  av_log(NULL, AV_LOG_INFO, " (copy)");
3748  else {
3749  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3750  const AVCodec *out_codec = ost->enc;
3751  const char *decoder_name = "?";
3752  const char *in_codec_name = "?";
3753  const char *encoder_name = "?";
3754  const char *out_codec_name = "?";
3755  const AVCodecDescriptor *desc;
3756 
3757  if (in_codec) {
3758  decoder_name = in_codec->name;
3759  desc = avcodec_descriptor_get(in_codec->id);
3760  if (desc)
3761  in_codec_name = desc->name;
3762  if (!strcmp(decoder_name, in_codec_name))
3763  decoder_name = "native";
3764  }
3765 
3766  if (out_codec) {
3767  encoder_name = out_codec->name;
3768  desc = avcodec_descriptor_get(out_codec->id);
3769  if (desc)
3770  out_codec_name = desc->name;
3771  if (!strcmp(encoder_name, out_codec_name))
3772  encoder_name = "native";
3773  }
3774 
3775  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3776  in_codec_name, decoder_name,
3777  out_codec_name, encoder_name);
3778  }
3779  av_log(NULL, AV_LOG_INFO, "\n");
3780  }
3781 
3782  if (ret) {
3783  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3784  return ret;
3785  }
3786 
3788 
3789  return 0;
3790 }
3791 
3792 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3793 static int need_output(void)
3794 {
3795  int i;
3796 
3797  for (i = 0; i < nb_output_streams; i++) {
3798  OutputStream *ost = output_streams[i];
3799  OutputFile *of = output_files[ost->file_index];
3800  AVFormatContext *os = output_files[ost->file_index]->ctx;
3801 
3802  if (ost->finished ||
3803  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3804  continue;
3805  if (ost->frame_number >= ost->max_frames) {
3806  int j;
3807  for (j = 0; j < of->ctx->nb_streams; j++)
3808  close_output_stream(output_streams[of->ost_index + j]);
3809  continue;
3810  }
3811 
3812  return 1;
3813  }
3814 
3815  return 0;
3816 }
3817 
3818 /**
3819  * Select the output stream to process.
3820  *
3821  * @return selected output stream, or NULL if none available
3822  */
3824 {
3825  int i;
3826  int64_t opts_min = INT64_MAX;
3827  OutputStream *ost_min = NULL;
3828 
3829  for (i = 0; i < nb_output_streams; i++) {
3830  OutputStream *ost = output_streams[i];
3831  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3832  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3833  AV_TIME_BASE_Q);
3834  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3835  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3836 
3837  if (!ost->initialized && !ost->inputs_done)
3838  return ost;
3839 
3840  if (!ost->finished && opts < opts_min) {
3841  opts_min = opts;
3842  ost_min = ost->unavailable ? NULL : ost;
3843  }
3844  }
3845  return ost_min;
3846 }
3847 
3848 static void set_tty_echo(int on)
3849 {
3850 #if HAVE_TERMIOS_H
3851  struct termios tty;
3852  if (tcgetattr(0, &tty) == 0) {
3853  if (on) tty.c_lflag |= ECHO;
3854  else tty.c_lflag &= ~ECHO;
3855  tcsetattr(0, TCSANOW, &tty);
3856  }
3857 #endif
3858 }
3859 
3861 {
3862  int i, ret, key;
3863  static int64_t last_time;
3864  if (received_nb_signals)
3865  return AVERROR_EXIT;
3866  /* read_key() returns 0 on EOF */
3867  if(cur_time - last_time >= 100000 && !run_as_daemon){
3868  key = read_key();
3869  last_time = cur_time;
3870  }else
3871  key = -1;
3872  if (key == 'q')
3873  return AVERROR_EXIT;
3874  if (key == '+') av_log_set_level(av_log_get_level()+10);
3875  if (key == '-') av_log_set_level(av_log_get_level()-10);
3876  if (key == 's') qp_hist ^= 1;
3877  if (key == 'h'){
3878  if (do_hex_dump){
3879  do_hex_dump = do_pkt_dump = 0;
3880  } else if(do_pkt_dump){
3881  do_hex_dump = 1;
3882  } else
3883  do_pkt_dump = 1;
3885  }
3886  if (key == 'c' || key == 'C'){
3887  char buf[4096], target[64], command[256], arg[256] = {0};
3888  double time;
3889  int k, n = 0;
3890  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3891  i = 0;
3892  set_tty_echo(1);
3893  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3894  if (k > 0)
3895  buf[i++] = k;
3896  buf[i] = 0;
3897  set_tty_echo(0);
3898  fprintf(stderr, "\n");
3899  if (k > 0 &&
3900  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3901  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3902  target, time, command, arg);
3903  for (i = 0; i < nb_filtergraphs; i++) {
3904  FilterGraph *fg = filtergraphs[i];
3905  if (fg->graph) {
3906  if (time < 0) {
3907  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3908  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3909  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3910  } else if (key == 'c') {
3911  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3912  ret = AVERROR_PATCHWELCOME;
3913  } else {
3914  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3915  if (ret < 0)
3916  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3917  }
3918  }
3919  }
3920  } else {
3922  "Parse error, at least 3 arguments were expected, "
3923  "only %d given in string '%s'\n", n, buf);
3924  }
3925  }
3926  if (key == 'd' || key == 'D'){
3927  int debug=0;
3928  if(key == 'D') {
3929  debug = input_streams[0]->st->codec->debug<<1;
3930  if(!debug) debug = 1;
3931  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3932  debug += debug;
3933  }else{
3934  char buf[32];
3935  int k = 0;
3936  i = 0;
3937  set_tty_echo(1);
3938  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3939  if (k > 0)
3940  buf[i++] = k;
3941  buf[i] = 0;
3942  set_tty_echo(0);
3943  fprintf(stderr, "\n");
3944  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3945  fprintf(stderr,"error parsing debug value\n");
3946  }
3947  for(i=0;i<nb_input_streams;i++) {
3948  input_streams[i]->st->codec->debug = debug;
3949  }
3950  for(i=0;i<nb_output_streams;i++) {
3951  OutputStream *ost = output_streams[i];
3952  ost->enc_ctx->debug = debug;
3953  }
3954  if(debug) av_log_set_level(AV_LOG_DEBUG);
3955  fprintf(stderr,"debug=%d\n", debug);
3956  }
3957  if (key == '?'){
3958  fprintf(stderr, "key function\n"
3959  "? show this help\n"
3960  "+ increase verbosity\n"
3961  "- decrease verbosity\n"
3962  "c Send command to first matching filter supporting it\n"
3963  "C Send/Queue command to all matching filters\n"
3964  "D cycle through available debug modes\n"
3965  "h dump packets/hex press to cycle through the 3 states\n"
3966  "q quit\n"
3967  "s Show QP histogram\n"
3968  );
3969  }
3970  return 0;
3971 }
3972 
3973 #if HAVE_PTHREADS
3974 static void *input_thread(void *arg)
3975 {
3976  InputFile *f = arg;
3977  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3978  int ret = 0;
3979 
3980  while (1) {
3981  AVPacket pkt;
3982  ret = av_read_frame(f->ctx, &pkt);
3983 
3984  if (ret == AVERROR(EAGAIN)) {
3985  av_usleep(10000);
3986  continue;
3987  }
3988  if (ret < 0) {
3989  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3990  break;
3991  }
3992  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3993  if (flags && ret == AVERROR(EAGAIN)) {
3994  flags = 0;
3995  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3997  "Thread message queue blocking; consider raising the "
3998  "thread_queue_size option (current value: %d)\n",
3999  f->thread_queue_size);
4000  }
4001  if (ret < 0) {
4002  if (ret != AVERROR_EOF)
4003  av_log(f->ctx, AV_LOG_ERROR,
4004  "Unable to send packet to main thread: %s\n",
4005  av_err2str(ret));
4006  av_packet_unref(&pkt);
4007  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4008  break;
4009  }
4010  }
4011 
4012  return NULL;
4013 }
4014 
4015 static void free_input_threads(void)
4016 {
4017  int i;
4018 
4019  for (i = 0; i < nb_input_files; i++) {
4020  InputFile *f = input_files[i];
4021  AVPacket pkt;
4022 
4023  if (!f || !f->in_thread_queue)
4024  continue;
4026  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4027  av_packet_unref(&pkt);
4028 
4029  pthread_join(f->thread, NULL);
4030  f->joined = 1;
4031  av_thread_message_queue_free(&f->in_thread_queue);
4032  }
4033 }
4034 
4035 static int init_input_threads(void)
4036 {
4037  int i, ret;
4038 
4039  if (nb_input_files == 1)
4040  return 0;
4041 
4042  for (i = 0; i < nb_input_files; i++) {
4043  InputFile *f = input_files[i];
4044 
4045  if (f->ctx->pb ? !f->ctx->pb->seekable :
4046  strcmp(f->ctx->iformat->name, "lavfi"))
4047  f->non_blocking = 1;
4048  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4049  f->thread_queue_size, sizeof(AVPacket));
4050  if (ret < 0)
4051  return ret;
4052 
4053  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4054  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4055  av_thread_message_queue_free(&f->in_thread_queue);
4056  return AVERROR(ret);
4057  }
4058  }
4059  return 0;
4060 }
4061 
4062 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4063 {
4064  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4065  f->non_blocking ?
4067 }
4068 #endif
4069 
4071 {
4072  if (f->rate_emu) {
4073  int i;
4074  for (i = 0; i < f->nb_streams; i++) {
4075  InputStream *ist = input_streams[f->ist_index + i];
4076  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4077  int64_t now = av_gettime_relative() - ist->start;
4078  if (pts > now)
4079  return AVERROR(EAGAIN);
4080  }
4081  }
4082 
4083 #if HAVE_PTHREADS
4084  if (nb_input_files > 1)
4085  return get_input_packet_mt(f, pkt);
4086 #endif
4087  return av_read_frame(f->ctx, pkt);
4088 }
4089 
4090 static int got_eagain(void)
4091 {
4092  int i;
4093  for (i = 0; i < nb_output_streams; i++)
4094  if (output_streams[i]->unavailable)
4095  return 1;
4096  return 0;
4097 }
4098 
4099 static void reset_eagain(void)
4100 {
4101  int i;
4102  for (i = 0; i < nb_input_files; i++)
4103  input_files[i]->eagain = 0;
4104  for (i = 0; i < nb_output_streams; i++)
4105  output_streams[i]->unavailable = 0;
4106 }
4107 
4108 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4109 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4110  AVRational time_base)
4111 {
4112  int ret;
4113 
4114  if (!*duration) {
4115  *duration = tmp;
4116  return tmp_time_base;
4117  }
4118 
4119  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4120  if (ret < 0) {
4121  *duration = tmp;
4122  return tmp_time_base;
4123  }
4124 
4125  return time_base;
4126 }
4127 
4129 {
4130  InputStream *ist;
4131  AVCodecContext *avctx;
4132  int i, ret, has_audio = 0;
4133  int64_t duration = 0;
4134 
4135  ret = av_seek_frame(is, -1, is->start_time, 0);
4136  if (ret < 0)
4137  return ret;
4138 
4139  for (i = 0; i < ifile->nb_streams; i++) {
4140  ist = input_streams[ifile->ist_index + i];
4141  avctx = ist->dec_ctx;
4142 
4143  // flush decoders
4144  if (ist->decoding_needed) {
4145  process_input_packet(ist, NULL, 1);
4146  avcodec_flush_buffers(avctx);
4147  }
4148 
4149  /* duration is the length of the last frame in a stream
4150  * when audio stream is present we don't care about
4151  * last video frame length because it's not defined exactly */
4152  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4153  has_audio = 1;
4154  }
4155 
4156  for (i = 0; i < ifile->nb_streams; i++) {
4157  ist = input_streams[ifile->ist_index + i];
4158  avctx = ist->dec_ctx;
4159 
4160  if (has_audio) {
4161  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4162  AVRational sample_rate = {1, avctx->sample_rate};
4163 
4164  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4165  } else
4166  continue;
4167  } else {
4168  if (ist->framerate.num) {
4169  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4170  } else if (ist->st->avg_frame_rate.num) {
4171  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4172  } else duration = 1;
4173  }
4174  if (!ifile->duration)
4175  ifile->time_base = ist->st->time_base;
4176  /* the total duration of the stream, max_pts - min_pts is
4177  * the duration of the stream without the last frame */
4178  duration += ist->max_pts - ist->min_pts;
4179  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4180  ifile->time_base);
4181  }
4182 
4183  if (ifile->loop > 0)
4184  ifile->loop--;
4185 
4186  return ret;
4187 }
4188 
4189 /*
4190  * Return
4191  * - 0 -- one packet was read and processed
4192  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4193  * this function should be called again
4194  * - AVERROR_EOF -- this function should not be called again
4195  */
4196 static int process_input(int file_index)
4197 {
4198  InputFile *ifile = input_files[file_index];
4199  AVFormatContext *is;
4200  InputStream *ist;
4201  AVPacket pkt;
4202  int ret, i, j;
4203  int64_t duration;
4204  int64_t pkt_dts;
4205 
4206  is = ifile->ctx;
4207  ret = get_input_packet(ifile, &pkt);
4208 
4209  if (ret == AVERROR(EAGAIN)) {
4210  ifile->eagain = 1;
4211  return ret;
4212  }
4213  if (ret < 0 && ifile->loop) {
4214  if ((ret = seek_to_start(ifile, is)) < 0)
4215  return ret;
4216  ret = get_input_packet(ifile, &pkt);
4217  if (ret == AVERROR(EAGAIN)) {
4218  ifile->eagain = 1;
4219  return ret;
4220  }
4221  }
4222  if (ret < 0) {
4223  if (ret != AVERROR_EOF) {
4224  print_error(is->filename, ret);
4225  if (exit_on_error)
4226  exit_program(1);
4227  }
4228 
4229  for (i = 0; i < ifile->nb_streams; i++) {
4230  ist = input_streams[ifile->ist_index + i];
4231  if (ist->decoding_needed) {
4232  ret = process_input_packet(ist, NULL, 0);
4233  if (ret>0)
4234  return 0;
4235  }
4236 
4237  /* mark all outputs that don't go through lavfi as finished */
4238  for (j = 0; j < nb_output_streams; j++) {
4239  OutputStream *ost = output_streams[j];
4240 
4241  if (ost->source_index == ifile->ist_index + i &&
4242  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4243  finish_output_stream(ost);
4244  }
4245  }
4246 
4247  ifile->eof_reached = 1;
4248  return AVERROR(EAGAIN);
4249  }
4250 
4251  reset_eagain();
4252 
4253  if (do_pkt_dump) {
4255  is->streams[pkt.stream_index]);
4256  }
4257  /* the following test is needed in case new streams appear
4258  dynamically in stream : we ignore them */
4259  if (pkt.stream_index >= ifile->nb_streams) {
4260  report_new_stream(file_index, &pkt);
4261  goto discard_packet;
4262  }
4263 
4264  ist = input_streams[ifile->ist_index + pkt.stream_index];
4265 
4266  ist->data_size += pkt.size;
4267  ist->nb_packets++;
4268 
4269  if (ist->discard)
4270  goto discard_packet;
4271 
4272  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4273  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4274  exit_program(1);
4275  }
4276 
4277  if (debug_ts) {
4278  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4279  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4283  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4284  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4285  av_ts2str(input_files[ist->file_index]->ts_offset),
4286  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4287  }
4288 
4289  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4290  int64_t stime, stime2;
4291  // Correcting starttime based on the enabled streams
4292  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4293  // so we instead do it here as part of discontinuity handling
4294  if ( ist->next_dts == AV_NOPTS_VALUE
4295  && ifile->ts_offset == -is->start_time
4296  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4297  int64_t new_start_time = INT64_MAX;
4298  for (i=0; i<is->nb_streams; i++) {
4299  AVStream *st = is->streams[i];
4300  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4301  continue;
4302  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4303  }
4304  if (new_start_time > is->start_time) {
4305  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4306  ifile->ts_offset = -new_start_time;
4307  }
4308  }
4309 
4310  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4311  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4312  ist->wrap_correction_done = 1;
4313 
4314  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4315  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4316  ist->wrap_correction_done = 0;
4317  }
4318  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4319  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4320  ist->wrap_correction_done = 0;
4321  }
4322  }
4323 
4324  /* add the stream-global side data to the first packet */
4325  if (ist->nb_packets == 1) {
4326  for (i = 0; i < ist->st->nb_side_data; i++) {
4327  AVPacketSideData *src_sd = &ist->st->side_data[i];
4328  uint8_t *dst_data;
4329 
4330  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4331  continue;
4332 
4333  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4334  continue;
4335 
4336  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4337  if (!dst_data)
4338  exit_program(1);
4339 
4340  memcpy(dst_data, src_sd->data, src_sd->size);
4341  }
4342  }
4343 
4344  if (pkt.dts != AV_NOPTS_VALUE)
4345  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4346  if (pkt.pts != AV_NOPTS_VALUE)
4347  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4348 
4349  if (pkt.pts != AV_NOPTS_VALUE)
4350  pkt.pts *= ist->ts_scale;
4351  if (pkt.dts != AV_NOPTS_VALUE)
4352  pkt.dts *= ist->ts_scale;
4353 
4355  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4357  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4358  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4359  int64_t delta = pkt_dts - ifile->last_ts;
4360  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4361  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4362  ifile->ts_offset -= delta;
4364  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4365  delta, ifile->ts_offset);
4366  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4367  if (pkt.pts != AV_NOPTS_VALUE)
4368  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4369  }
4370  }
4371 
4372  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4373  if (pkt.pts != AV_NOPTS_VALUE) {
4374  pkt.pts += duration;
4375  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4376  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4377  }
4378 
4379  if (pkt.dts != AV_NOPTS_VALUE)
4380  pkt.dts += duration;
4381 
4383  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4385  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4386  !copy_ts) {
4387  int64_t delta = pkt_dts - ist->next_dts;
4388  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4389  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4390  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4391  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4392  ifile->ts_offset -= delta;
4394  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4395  delta, ifile->ts_offset);
4396  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4397  if (pkt.pts != AV_NOPTS_VALUE)
4398  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4399  }
4400  } else {
4401  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4402  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4403  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4404  pkt.dts = AV_NOPTS_VALUE;
4405  }
4406  if (pkt.pts != AV_NOPTS_VALUE){
4407  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4408  delta = pkt_pts - ist->next_dts;
4409  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4410  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4411  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4412  pkt.pts = AV_NOPTS_VALUE;
4413  }
4414  }
4415  }
4416  }
4417 
4418  if (pkt.dts != AV_NOPTS_VALUE)
4419  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4420 
4421  if (debug_ts) {
4422  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4424  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4425  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4426  av_ts2str(input_files[ist->file_index]->ts_offset),
4427  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4428  }
4429 
4430  sub2video_heartbeat(ist, pkt.pts);
4431 
4432  process_input_packet(ist, &pkt, 0);
4433 
4434 discard_packet:
4435  av_packet_unref(&pkt);
4436 
4437  return 0;
4438 }
4439 
4440 /**
4441  * Perform a step of transcoding for the specified filter graph.
4442  *
4443  * @param[in] graph filter graph to consider
4444  * @param[out] best_ist input stream where a frame would allow to continue
4445  * @return 0 for success, <0 for error
4446  */
4447 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4448 {
4449  int i, ret;
4450  int nb_requests, nb_requests_max = 0;
4451  InputFilter *ifilter;
4452  InputStream *ist;
4453 
4454  *best_ist = NULL;
4455  ret = avfilter_graph_request_oldest(graph->graph);
4456  if (ret >= 0)
4457  return reap_filters(0);
4458 
4459  if (ret == AVERROR_EOF) {
4460  ret = reap_filters(1);
4461  for (i = 0; i < graph->nb_outputs; i++)
4462  close_output_stream(graph->outputs[i]->ost);
4463  return ret;
4464  }
4465  if (ret != AVERROR(EAGAIN))
4466  return ret;
4467 
4468  for (i = 0; i < graph->nb_inputs; i++) {
4469  ifilter = graph->inputs[i];
4470  ist = ifilter->ist;
4471  if (input_files[ist->file_index]->eagain ||
4472  input_files[ist->file_index]->eof_reached)
4473  continue;
4474  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4475  if (nb_requests > nb_requests_max) {
4476  nb_requests_max = nb_requests;
4477  *best_ist = ist;
4478  }
4479  }
4480 
4481  if (!*best_ist)
4482  for (i = 0; i < graph->nb_outputs; i++)
4483  graph->outputs[i]->ost->unavailable = 1;
4484 
4485  return 0;
4486 }
4487 
4488 /**
4489  * Run a single step of transcoding.
4490  *
4491  * @return 0 for success, <0 for error
4492  */
4493 static int transcode_step(void)
4494 {
4495  OutputStream *ost;
4496  InputStream *ist = NULL;
4497  int ret;
4498 
4499  ost = choose_output();
4500  if (!ost) {
4501  if (got_eagain()) {
4502  reset_eagain();
4503  av_usleep(10000);
4504  return 0;
4505  }
4506  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4507  return AVERROR_EOF;
4508  }
4509 
4510  if (ost->filter && !ost->filter->graph->graph) {
4512  ret = configure_filtergraph(ost->filter->graph);
4513  if (ret < 0) {
4514  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4515  return ret;
4516  }
4517  }
4518  }
4519 
4520  if (ost->filter && ost->filter->graph->graph) {
4521  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4522  return ret;
4523  if (!ist)
4524  return 0;
4525  } else if (ost->filter) {
4526  int i;
4527  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4528  InputFilter *ifilter = ost->filter->graph->inputs[i];
4529  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4530  ist = ifilter->ist;
4531  break;
4532  }
4533  }
4534  if (!ist) {
4535  ost->inputs_done = 1;
4536  return 0;
4537  }
4538  } else {
4539  av_assert0(ost->source_index >= 0);
4540  ist = input_streams[ost->source_index];
4541  }
4542 
4543  ret = process_input(ist->file_index);
4544  if (ret == AVERROR(EAGAIN)) {
4545  if (input_files[ist->file_index]->eagain)
4546  ost->unavailable = 1;
4547  return 0;
4548  }
4549 
4550  if (ret < 0)
4551  return ret == AVERROR_EOF ? 0 : ret;
4552 
4553  return reap_filters(0);
4554 }
4555 
4556 /*
4557  * The following code is the main loop of the file converter
4558  */
4559 static int transcode(void)
4560 {
4561  int ret, i;
4562  AVFormatContext *os;
4563  OutputStream *ost;
4564  InputStream *ist;
4565  int64_t timer_start;
4566  int64_t total_packets_written = 0;
4567 
4568  ret = transcode_init();
4569  if (ret < 0)
4570  goto fail;
4571 
4572  if (stdin_interaction) {
4573  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4574  }
4575 
4576  timer_start = av_gettime_relative();
4577 
4578 #if HAVE_PTHREADS
4579  if ((ret = init_input_threads()) < 0)
4580  goto fail;
4581 #endif
4582 
4583  while (!received_sigterm) {
4584  int64_t cur_time= av_gettime_relative();
4585 
4586  /* if 'q' pressed, exits */
4587  if (stdin_interaction)
4588  if (check_keyboard_interaction(cur_time) < 0)
4589  break;
4590 
4591  /* check if there's any stream where output is still needed */
4592  if (!need_output()) {
4593  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4594  break;
4595  }
4596 
4597  ret = transcode_step();
4598  if (ret < 0 && ret != AVERROR_EOF) {
4599  char errbuf[128];
4600  av_strerror(ret, errbuf, sizeof(errbuf));
4601 
4602  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4603  break;
4604  }
4605 
4606  /* dump report by using the output first video and audio streams */
4607  print_report(0, timer_start, cur_time);
4608  }
4609 #if HAVE_PTHREADS
4610  free_input_threads();
4611 #endif
4612 
4613  /* at the end of stream, we must flush the decoder buffers */
4614  for (i = 0; i < nb_input_streams; i++) {
4615  ist = input_streams[i];
4616  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4617  process_input_packet(ist, NULL, 0);
4618  }
4619  }
4620  flush_encoders();
4621 
4622  term_exit();
4623 
4624  /* write the trailer if needed and close file */
4625  for (i = 0; i < nb_output_files; i++) {
4626  os = output_files[i]->ctx;
4627  if (!output_files[i]->header_written) {
4629  "Nothing was written into output file %d (%s), because "
4630  "at least one of its streams received no packets.\n",
4631  i, os->filename);
4632  continue;
4633  }
4634  if ((ret = av_write_trailer(os)) < 0) {
4635  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4636  if (exit_on_error)
4637  exit_program(1);
4638  }
4639  }
4640 
4641  /* dump report by using the first video and audio streams */
4642  print_report(1, timer_start, av_gettime_relative());
4643 
4644  /* close each encoder */
4645  for (i = 0; i < nb_output_streams; i++) {
4646  ost = output_streams[i];
4647  if (ost->encoding_needed) {
4648  av_freep(&ost->enc_ctx->stats_in);
4649  }
4650  total_packets_written += ost->packets_written;
4651  }
4652 
4653  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4654  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4655  exit_program(1);
4656  }
4657 
4658  /* close each decoder */
4659  for (i = 0; i < nb_input_streams; i++) {
4660  ist = input_streams[i];
4661  if (ist->decoding_needed) {
4662  avcodec_close(ist->dec_ctx);
4663  if (ist->hwaccel_uninit)
4664  ist->hwaccel_uninit(ist->dec_ctx);
4665  }
4666  }
4667 
4670 
4671  /* finished ! */
4672  ret = 0;
4673 
4674  fail:
4675 #if HAVE_PTHREADS
4676  free_input_threads();
4677 #endif
4678 
4679  if (output_streams) {
4680  for (i = 0; i < nb_output_streams; i++) {
4681  ost = output_streams[i];
4682  if (ost) {
4683  if (ost->logfile) {
4684  if (fclose(ost->logfile))
4686  "Error closing logfile