FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223 
224  av_assert1(frame->data[0]);
225  ist->sub2video.last_pts = frame->pts = pts;
226  for (i = 0; i < ist->nb_filters; i++)
230 }
231 
233 {
234  AVFrame *frame = ist->sub2video.frame;
235  int8_t *dst;
236  int dst_linesize;
237  int num_rects, i;
238  int64_t pts, end_pts;
239 
240  if (!frame)
241  return;
242  if (sub) {
243  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244  AV_TIME_BASE_Q, ist->st->time_base);
245  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  num_rects = sub->num_rects;
248  } else {
249  pts = ist->sub2video.end_pts;
250  end_pts = INT64_MAX;
251  num_rects = 0;
252  }
253  if (sub2video_get_blank_frame(ist) < 0) {
255  "Impossible to get a blank canvas.\n");
256  return;
257  }
258  dst = frame->data [0];
259  dst_linesize = frame->linesize[0];
260  for (i = 0; i < num_rects; i++)
261  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262  sub2video_push_ref(ist, pts);
263  ist->sub2video.end_pts = end_pts;
264 }
265 
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 {
268  InputFile *infile = input_files[ist->file_index];
269  int i, j, nb_reqs;
270  int64_t pts2;
271 
272  /* When a frame is read from a file, examine all sub2video streams in
273  the same file and send the sub2video frame again. Otherwise, decoded
274  video frames could be accumulating in the filter graph while a filter
275  (possibly overlay) is desperately waiting for a subtitle frame. */
276  for (i = 0; i < infile->nb_streams; i++) {
277  InputStream *ist2 = input_streams[infile->ist_index + i];
278  if (!ist2->sub2video.frame)
279  continue;
280  /* subtitles seem to be usually muxed ahead of other streams;
281  if not, subtracting a larger time here is necessary */
282  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283  /* do not send the heartbeat frame if the subtitle is already ahead */
284  if (pts2 <= ist2->sub2video.last_pts)
285  continue;
286  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287  sub2video_update(ist2, NULL);
288  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290  if (nb_reqs)
291  sub2video_push_ref(ist2, pts2);
292  }
293 }
294 
295 static void sub2video_flush(InputStream *ist)
296 {
297  int i;
298 
299  if (ist->sub2video.end_pts < INT64_MAX)
300  sub2video_update(ist, NULL);
301  for (i = 0; i < ist->nb_filters; i++)
303 }
304 
305 /* end of sub2video hack */
306 
307 static void term_exit_sigsafe(void)
308 {
309 #if HAVE_TERMIOS_H
310  if(restore_tty)
311  tcsetattr (0, TCSANOW, &oldtty);
312 #endif
313 }
314 
315 void term_exit(void)
316 {
317  av_log(NULL, AV_LOG_QUIET, "%s", "");
319 }
320 
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
326 
327 static void
329 {
330  received_sigterm = sig;
333  if(received_nb_signals > 3) {
334  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335  strlen("Received > 3 system signals, hard exiting\n"));
336 
337  exit(123);
338  }
339 }
340 
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 {
344  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 
346  switch (fdwCtrlType)
347  {
348  case CTRL_C_EVENT:
349  case CTRL_BREAK_EVENT:
350  sigterm_handler(SIGINT);
351  return TRUE;
352 
353  case CTRL_CLOSE_EVENT:
354  case CTRL_LOGOFF_EVENT:
355  case CTRL_SHUTDOWN_EVENT:
356  sigterm_handler(SIGTERM);
357  /* Basically, with these 3 events, when we return from this method the
358  process is hard terminated, so stall as long as we need to
359  to try and let the main thread(s) clean up and gracefully terminate
360  (we have at most 5 seconds, but should be done far before that). */
361  while (!ffmpeg_exited) {
362  Sleep(0);
363  }
364  return TRUE;
365 
366  default:
367  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368  return FALSE;
369  }
370 }
371 #endif
372 
373 void term_init(void)
374 {
375 #if HAVE_TERMIOS_H
377  struct termios tty;
378  if (tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480  AVFrame *frame;
481  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482  sizeof(frame), NULL);
483  av_frame_free(&frame);
484  }
485  av_fifo_freep(&fg->inputs[j]->frame_queue);
486  if (fg->inputs[j]->ist->sub2video.sub_queue) {
487  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488  AVSubtitle sub;
490  &sub, sizeof(sub), NULL);
491  avsubtitle_free(&sub);
492  }
494  }
496  av_freep(&fg->inputs[j]->name);
497  av_freep(&fg->inputs[j]);
498  }
499  av_freep(&fg->inputs);
500  for (j = 0; j < fg->nb_outputs; j++) {
501  av_freep(&fg->outputs[j]->name);
502  av_freep(&fg->outputs[j]->formats);
503  av_freep(&fg->outputs[j]->channel_layouts);
504  av_freep(&fg->outputs[j]->sample_rates);
505  av_freep(&fg->outputs[j]);
506  }
507  av_freep(&fg->outputs);
508  av_freep(&fg->graph_desc);
509 
510  av_freep(&filtergraphs[i]);
511  }
512  av_freep(&filtergraphs);
513 
515 
516  /* close files */
517  for (i = 0; i < nb_output_files; i++) {
518  OutputFile *of = output_files[i];
520  if (!of)
521  continue;
522  s = of->ctx;
523  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524  avio_closep(&s->pb);
526  av_dict_free(&of->opts);
527 
528  av_freep(&output_files[i]);
529  }
530  for (i = 0; i < nb_output_streams; i++) {
531  OutputStream *ost = output_streams[i];
532 
533  if (!ost)
534  continue;
535 
536  for (j = 0; j < ost->nb_bitstream_filters; j++)
537  av_bsf_free(&ost->bsf_ctx[j]);
538  av_freep(&ost->bsf_ctx);
539 
541  av_frame_free(&ost->last_frame);
542  av_dict_free(&ost->encoder_opts);
543 
544  av_parser_close(ost->parser);
546 
547  av_freep(&ost->forced_keyframes);
549  av_freep(&ost->avfilter);
550  av_freep(&ost->logfile_prefix);
551 
553  ost->audio_channels_mapped = 0;
554 
555  av_dict_free(&ost->sws_dict);
556 
559 
560  if (ost->muxing_queue) {
561  while (av_fifo_size(ost->muxing_queue)) {
562  AVPacket pkt;
563  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564  av_packet_unref(&pkt);
565  }
567  }
568 
569  av_freep(&output_streams[i]);
570  }
571 #if HAVE_PTHREADS
572  free_input_threads();
573 #endif
574  for (i = 0; i < nb_input_files; i++) {
575  avformat_close_input(&input_files[i]->ctx);
576  av_freep(&input_files[i]);
577  }
578  for (i = 0; i < nb_input_streams; i++) {
579  InputStream *ist = input_streams[i];
580 
583  av_dict_free(&ist->decoder_opts);
586  av_freep(&ist->filters);
587  av_freep(&ist->hwaccel_device);
588  av_freep(&ist->dts_buffer);
589 
591 
592  av_freep(&input_streams[i]);
593  }
594 
595  if (vstats_file) {
596  if (fclose(vstats_file))
598  "Error closing vstats file, loss of information possible: %s\n",
599  av_err2str(AVERROR(errno)));
600  }
602 
603  av_freep(&input_streams);
604  av_freep(&input_files);
605  av_freep(&output_streams);
606  av_freep(&output_files);
607 
608  uninit_opts();
609 
611 
612  if (received_sigterm) {
613  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614  (int) received_sigterm);
615  } else if (ret && atomic_load(&transcode_init_done)) {
616  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
617  }
618  term_exit();
619  ffmpeg_exited = 1;
620 }
621 
623 {
624  AVDictionaryEntry *t = NULL;
625 
626  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628  }
629 }
630 
632 {
634  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
636  exit_program(1);
637  }
638 }
639 
640 static void abort_codec_experimental(AVCodec *c, int encoder)
641 {
642  exit_program(1);
643 }
644 
645 static void update_benchmark(const char *fmt, ...)
646 {
647  if (do_benchmark_all) {
648  int64_t t = getutime();
649  va_list va;
650  char buf[1024];
651 
652  if (fmt) {
653  va_start(va, fmt);
654  vsnprintf(buf, sizeof(buf), fmt, va);
655  va_end(va);
656  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
657  }
658  current_time = t;
659  }
660 }
661 
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 {
664  int i;
665  for (i = 0; i < nb_output_streams; i++) {
666  OutputStream *ost2 = output_streams[i];
667  ost2->finished |= ost == ost2 ? this_stream : others;
668  }
669 }
670 
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
672 {
673  AVFormatContext *s = of->ctx;
674  AVStream *st = ost->st;
675  int ret;
676 
677  /*
678  * Audio encoders may split the packets -- #frames in != #packets out.
679  * But there is no reordering, so we can limit the number of output packets
680  * by simply dropping them here.
681  * Counting encoded video frames needs to be done separately because of
682  * reordering, see do_video_out().
683  * Do not count the packet when unqueued because it has been counted when queued.
684  */
685  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686  if (ost->frame_number >= ost->max_frames) {
687  av_packet_unref(pkt);
688  return;
689  }
690  ost->frame_number++;
691  }
692 
693  if (!of->header_written) {
694  AVPacket tmp_pkt = {0};
695  /* the muxer is not initialized yet, buffer the packet */
696  if (!av_fifo_space(ost->muxing_queue)) {
697  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698  ost->max_muxing_queue_size);
699  if (new_size <= av_fifo_size(ost->muxing_queue)) {
701  "Too many packets buffered for output stream %d:%d.\n",
702  ost->file_index, ost->st->index);
703  exit_program(1);
704  }
705  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
706  if (ret < 0)
707  exit_program(1);
708  }
709  ret = av_packet_ref(&tmp_pkt, pkt);
710  if (ret < 0)
711  exit_program(1);
712  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713  av_packet_unref(pkt);
714  return;
715  }
716 
719  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
720 
721  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
722  int i;
724  NULL);
725  ost->quality = sd ? AV_RL32(sd) : -1;
726  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
727 
728  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
729  if (sd && i < sd[5])
730  ost->error[i] = AV_RL64(sd + 8 + 8*i);
731  else
732  ost->error[i] = -1;
733  }
734 
735  if (ost->frame_rate.num && ost->is_cfr) {
736  if (pkt->duration > 0)
737  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739  ost->mux_timebase);
740  }
741  }
742 
743  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
744 
745  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746  if (pkt->dts != AV_NOPTS_VALUE &&
747  pkt->pts != AV_NOPTS_VALUE &&
748  pkt->dts > pkt->pts) {
749  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
750  pkt->dts, pkt->pts,
751  ost->file_index, ost->st->index);
752  pkt->pts =
753  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
756  }
758  pkt->dts != AV_NOPTS_VALUE &&
759  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760  ost->last_mux_dts != AV_NOPTS_VALUE) {
761  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762  if (pkt->dts < max) {
763  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764  av_log(s, loglevel, "Non-monotonous DTS in output stream "
765  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
767  if (exit_on_error) {
768  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
769  exit_program(1);
770  }
771  av_log(s, loglevel, "changing to %"PRId64". This may result "
772  "in incorrect timestamps in the output file.\n",
773  max);
774  if (pkt->pts >= pkt->dts)
775  pkt->pts = FFMAX(pkt->pts, max);
776  pkt->dts = max;
777  }
778  }
779  }
780  ost->last_mux_dts = pkt->dts;
781 
782  ost->data_size += pkt->size;
783  ost->packets_written++;
784 
785  pkt->stream_index = ost->index;
786 
787  if (debug_ts) {
788  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793  pkt->size
794  );
795  }
796 
797  ret = av_interleaved_write_frame(s, pkt);
798  if (ret < 0) {
799  print_error("av_interleaved_write_frame()", ret);
800  main_return_code = 1;
802  }
803  av_packet_unref(pkt);
804 }
805 
807 {
808  OutputFile *of = output_files[ost->file_index];
809 
810  ost->finished |= ENCODER_FINISHED;
811  if (of->shortest) {
812  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813  of->recording_time = FFMIN(of->recording_time, end);
814  }
815 }
816 
818 {
819  int ret = 0;
820 
821  /* apply the output bitstream filters, if any */
822  if (ost->nb_bitstream_filters) {
823  int idx;
824 
825  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
826  if (ret < 0)
827  goto finish;
828 
829  idx = 1;
830  while (idx) {
831  /* get a packet from the previous filter up the chain */
832  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
833  if (ret == AVERROR(EAGAIN)) {
834  ret = 0;
835  idx--;
836  continue;
837  } else if (ret < 0)
838  goto finish;
839 
840  /* send it to the next filter down the chain or to the muxer */
841  if (idx < ost->nb_bitstream_filters) {
842  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
843  if (ret < 0)
844  goto finish;
845  idx++;
846  } else
847  write_packet(of, pkt, ost, 0);
848  }
849  } else
850  write_packet(of, pkt, ost, 0);
851 
852 finish:
853  if (ret < 0 && ret != AVERROR_EOF) {
854  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
855  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
856  if(exit_on_error)
857  exit_program(1);
858  }
859 }
860 
862 {
863  OutputFile *of = output_files[ost->file_index];
864 
865  if (of->recording_time != INT64_MAX &&
867  AV_TIME_BASE_Q) >= 0) {
868  close_output_stream(ost);
869  return 0;
870  }
871  return 1;
872 }
873 
874 static void do_audio_out(OutputFile *of, OutputStream *ost,
875  AVFrame *frame)
876 {
877  AVCodecContext *enc = ost->enc_ctx;
878  AVPacket pkt;
879  int ret;
880 
881  av_init_packet(&pkt);
882  pkt.data = NULL;
883  pkt.size = 0;
884 
885  if (!check_recording_time(ost))
886  return;
887 
888  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
889  frame->pts = ost->sync_opts;
890  ost->sync_opts = frame->pts + frame->nb_samples;
891  ost->samples_encoded += frame->nb_samples;
892  ost->frames_encoded++;
893 
894  av_assert0(pkt.size || !pkt.data);
896  if (debug_ts) {
897  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
898  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
899  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
900  enc->time_base.num, enc->time_base.den);
901  }
902 
903  ret = avcodec_send_frame(enc, frame);
904  if (ret < 0)
905  goto error;
906 
907  while (1) {
908  ret = avcodec_receive_packet(enc, &pkt);
909  if (ret == AVERROR(EAGAIN))
910  break;
911  if (ret < 0)
912  goto error;
913 
914  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
915 
916  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
917 
918  if (debug_ts) {
919  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
920  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
921  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
922  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
923  }
924 
925  output_packet(of, &pkt, ost);
926  }
927 
928  return;
929 error:
930  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
931  exit_program(1);
932 }
933 
934 static void do_subtitle_out(OutputFile *of,
935  OutputStream *ost,
936  AVSubtitle *sub)
937 {
938  int subtitle_out_max_size = 1024 * 1024;
939  int subtitle_out_size, nb, i;
940  AVCodecContext *enc;
941  AVPacket pkt;
942  int64_t pts;
943 
944  if (sub->pts == AV_NOPTS_VALUE) {
945  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
946  if (exit_on_error)
947  exit_program(1);
948  return;
949  }
950 
951  enc = ost->enc_ctx;
952 
953  if (!subtitle_out) {
954  subtitle_out = av_malloc(subtitle_out_max_size);
955  if (!subtitle_out) {
956  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
957  exit_program(1);
958  }
959  }
960 
961  /* Note: DVB subtitle need one packet to draw them and one other
962  packet to clear them */
963  /* XXX: signal it in the codec context ? */
965  nb = 2;
966  else
967  nb = 1;
968 
969  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
970  pts = sub->pts;
971  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
972  pts -= output_files[ost->file_index]->start_time;
973  for (i = 0; i < nb; i++) {
974  unsigned save_num_rects = sub->num_rects;
975 
976  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
977  if (!check_recording_time(ost))
978  return;
979 
980  sub->pts = pts;
981  // start_display_time is required to be 0
982  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
984  sub->start_display_time = 0;
985  if (i == 1)
986  sub->num_rects = 0;
987 
988  ost->frames_encoded++;
989 
990  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
991  subtitle_out_max_size, sub);
992  if (i == 1)
993  sub->num_rects = save_num_rects;
994  if (subtitle_out_size < 0) {
995  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
996  exit_program(1);
997  }
998 
999  av_init_packet(&pkt);
1000  pkt.data = subtitle_out;
1001  pkt.size = subtitle_out_size;
1002  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1003  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1004  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1005  /* XXX: the pts correction is handled here. Maybe handling
1006  it in the codec would be better */
1007  if (i == 0)
1008  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1009  else
1010  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1011  }
1012  pkt.dts = pkt.pts;
1013  output_packet(of, &pkt, ost);
1014  }
1015 }
1016 
1017 static void do_video_out(OutputFile *of,
1018  OutputStream *ost,
1019  AVFrame *next_picture,
1020  double sync_ipts)
1021 {
1022  int ret, format_video_sync;
1023  AVPacket pkt;
1024  AVCodecContext *enc = ost->enc_ctx;
1025  AVCodecParameters *mux_par = ost->st->codecpar;
1026  AVRational frame_rate;
1027  int nb_frames, nb0_frames, i;
1028  double delta, delta0;
1029  double duration = 0;
1030  int frame_size = 0;
1031  InputStream *ist = NULL;
1033 
1034  if (ost->source_index >= 0)
1035  ist = input_streams[ost->source_index];
1036 
1037  frame_rate = av_buffersink_get_frame_rate(filter);
1038  if (frame_rate.num > 0 && frame_rate.den > 0)
1039  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1040 
1041  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1042  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1043 
1044  if (!ost->filters_script &&
1045  !ost->filters &&
1046  next_picture &&
1047  ist &&
1048  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1049  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1050  }
1051 
1052  if (!next_picture) {
1053  //end, flushing
1054  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1055  ost->last_nb0_frames[1],
1056  ost->last_nb0_frames[2]);
1057  } else {
1058  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1059  delta = delta0 + duration;
1060 
1061  /* by default, we output a single frame */
1062  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1063  nb_frames = 1;
1064 
1065  format_video_sync = video_sync_method;
1066  if (format_video_sync == VSYNC_AUTO) {
1067  if(!strcmp(of->ctx->oformat->name, "avi")) {
1068  format_video_sync = VSYNC_VFR;
1069  } else
1070  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1071  if ( ist
1072  && format_video_sync == VSYNC_CFR
1073  && input_files[ist->file_index]->ctx->nb_streams == 1
1074  && input_files[ist->file_index]->input_ts_offset == 0) {
1075  format_video_sync = VSYNC_VSCFR;
1076  }
1077  if (format_video_sync == VSYNC_CFR && copy_ts) {
1078  format_video_sync = VSYNC_VSCFR;
1079  }
1080  }
1081  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1082 
1083  if (delta0 < 0 &&
1084  delta > 0 &&
1085  format_video_sync != VSYNC_PASSTHROUGH &&
1086  format_video_sync != VSYNC_DROP) {
1087  if (delta0 < -0.6) {
1088  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1089  } else
1090  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1091  sync_ipts = ost->sync_opts;
1092  duration += delta0;
1093  delta0 = 0;
1094  }
1095 
1096  switch (format_video_sync) {
1097  case VSYNC_VSCFR:
1098  if (ost->frame_number == 0 && delta0 >= 0.5) {
1099  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1100  delta = duration;
1101  delta0 = 0;
1102  ost->sync_opts = lrint(sync_ipts);
1103  }
1104  case VSYNC_CFR:
1105  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1106  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1107  nb_frames = 0;
1108  } else if (delta < -1.1)
1109  nb_frames = 0;
1110  else if (delta > 1.1) {
1111  nb_frames = lrintf(delta);
1112  if (delta0 > 1.1)
1113  nb0_frames = lrintf(delta0 - 0.6);
1114  }
1115  break;
1116  case VSYNC_VFR:
1117  if (delta <= -0.6)
1118  nb_frames = 0;
1119  else if (delta > 0.6)
1120  ost->sync_opts = lrint(sync_ipts);
1121  break;
1122  case VSYNC_DROP:
1123  case VSYNC_PASSTHROUGH:
1124  ost->sync_opts = lrint(sync_ipts);
1125  break;
1126  default:
1127  av_assert0(0);
1128  }
1129  }
1130 
1131  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1132  nb0_frames = FFMIN(nb0_frames, nb_frames);
1133 
1134  memmove(ost->last_nb0_frames + 1,
1135  ost->last_nb0_frames,
1136  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1137  ost->last_nb0_frames[0] = nb0_frames;
1138 
1139  if (nb0_frames == 0 && ost->last_dropped) {
1140  nb_frames_drop++;
1142  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1143  ost->frame_number, ost->st->index, ost->last_frame->pts);
1144  }
1145  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1146  if (nb_frames > dts_error_threshold * 30) {
1147  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1148  nb_frames_drop++;
1149  return;
1150  }
1151  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1152  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1153  if (nb_frames_dup > dup_warning) {
1154  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1155  dup_warning *= 10;
1156  }
1157  }
1158  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1159 
1160  /* duplicates frame if needed */
1161  for (i = 0; i < nb_frames; i++) {
1162  AVFrame *in_picture;
1163  av_init_packet(&pkt);
1164  pkt.data = NULL;
1165  pkt.size = 0;
1166 
1167  if (i < nb0_frames && ost->last_frame) {
1168  in_picture = ost->last_frame;
1169  } else
1170  in_picture = next_picture;
1171 
1172  if (!in_picture)
1173  return;
1174 
1175  in_picture->pts = ost->sync_opts;
1176 
1177 #if 1
1178  if (!check_recording_time(ost))
1179 #else
1180  if (ost->frame_number >= ost->max_frames)
1181 #endif
1182  return;
1183 
1184 #if FF_API_LAVF_FMT_RAWPICTURE
1185  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1186  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1187  /* raw pictures are written as AVPicture structure to
1188  avoid any copies. We support temporarily the older
1189  method. */
1190  if (in_picture->interlaced_frame)
1191  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1192  else
1193  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1194  pkt.data = (uint8_t *)in_picture;
1195  pkt.size = sizeof(AVPicture);
1196  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1197  pkt.flags |= AV_PKT_FLAG_KEY;
1198 
1199  output_packet(of, &pkt, ost);
1200  } else
1201 #endif
1202  {
1203  int forced_keyframe = 0;
1204  double pts_time;
1205 
1207  ost->top_field_first >= 0)
1208  in_picture->top_field_first = !!ost->top_field_first;
1209 
1210  if (in_picture->interlaced_frame) {
1211  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1212  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1213  else
1214  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1215  } else
1216  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1217 
1218  in_picture->quality = enc->global_quality;
1219  in_picture->pict_type = 0;
1220 
1221  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1222  in_picture->pts * av_q2d(enc->time_base) : NAN;
1223  if (ost->forced_kf_index < ost->forced_kf_count &&
1224  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1225  ost->forced_kf_index++;
1226  forced_keyframe = 1;
1227  } else if (ost->forced_keyframes_pexpr) {
1228  double res;
1229  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1232  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1238  res);
1239  if (res) {
1240  forced_keyframe = 1;
1246  }
1247 
1249  } else if ( ost->forced_keyframes
1250  && !strncmp(ost->forced_keyframes, "source", 6)
1251  && in_picture->key_frame==1) {
1252  forced_keyframe = 1;
1253  }
1254 
1255  if (forced_keyframe) {
1256  in_picture->pict_type = AV_PICTURE_TYPE_I;
1257  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1258  }
1259 
1261  if (debug_ts) {
1262  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1263  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1264  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1265  enc->time_base.num, enc->time_base.den);
1266  }
1267 
1268  ost->frames_encoded++;
1269 
1270  ret = avcodec_send_frame(enc, in_picture);
1271  if (ret < 0)
1272  goto error;
1273 
1274  while (1) {
1275  ret = avcodec_receive_packet(enc, &pkt);
1276  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1277  if (ret == AVERROR(EAGAIN))
1278  break;
1279  if (ret < 0)
1280  goto error;
1281 
1282  if (debug_ts) {
1283  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1284  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1285  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1286  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1287  }
1288 
1289  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1290  pkt.pts = ost->sync_opts;
1291 
1292  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1293 
1294  if (debug_ts) {
1295  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1296  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1297  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1298  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1299  }
1300 
1301  frame_size = pkt.size;
1302  output_packet(of, &pkt, ost);
1303 
1304  /* if two pass, output log */
1305  if (ost->logfile && enc->stats_out) {
1306  fprintf(ost->logfile, "%s", enc->stats_out);
1307  }
1308  }
1309  }
1310  ost->sync_opts++;
1311  /*
1312  * For video, number of frames in == number of packets out.
1313  * But there may be reordering, so we can't throw away frames on encoder
1314  * flush, we need to limit them here, before they go into encoder.
1315  */
1316  ost->frame_number++;
1317 
1318  if (vstats_filename && frame_size)
1319  do_video_stats(ost, frame_size);
1320  }
1321 
1322  if (!ost->last_frame)
1323  ost->last_frame = av_frame_alloc();
1324  av_frame_unref(ost->last_frame);
1325  if (next_picture && ost->last_frame)
1326  av_frame_ref(ost->last_frame, next_picture);
1327  else
1328  av_frame_free(&ost->last_frame);
1329 
1330  return;
1331 error:
1332  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1333  exit_program(1);
1334 }
1335 
1336 static double psnr(double d)
1337 {
1338  return -10.0 * log10(d);
1339 }
1340 
1342 {
1343  AVCodecContext *enc;
1344  int frame_number;
1345  double ti1, bitrate, avg_bitrate;
1346 
1347  /* this is executed just the first time do_video_stats is called */
1348  if (!vstats_file) {
1349  vstats_file = fopen(vstats_filename, "w");
1350  if (!vstats_file) {
1351  perror("fopen");
1352  exit_program(1);
1353  }
1354  }
1355 
1356  enc = ost->enc_ctx;
1357  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1358  frame_number = ost->st->nb_frames;
1359  if (vstats_version <= 1) {
1360  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1361  ost->quality / (float)FF_QP2LAMBDA);
1362  } else {
1363  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1364  ost->quality / (float)FF_QP2LAMBDA);
1365  }
1366 
1367  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1368  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1369 
1370  fprintf(vstats_file,"f_size= %6d ", frame_size);
1371  /* compute pts value */
1372  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1373  if (ti1 < 0.01)
1374  ti1 = 0.01;
1375 
1376  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1377  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1378  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1379  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1380  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1381  }
1382 }
1383 
1384 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1385 
1387 {
1388  OutputFile *of = output_files[ost->file_index];
1389  int i;
1390 
1392 
1393  if (of->shortest) {
1394  for (i = 0; i < of->ctx->nb_streams; i++)
1395  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1396  }
1397 }
1398 
1399 /**
1400  * Get and encode new output from any of the filtergraphs, without causing
1401  * activity.
1402  *
1403  * @return 0 for success, <0 for severe errors
1404  */
1405 static int reap_filters(int flush)
1406 {
1407  AVFrame *filtered_frame = NULL;
1408  int i;
1409 
1410  /* Reap all buffers present in the buffer sinks */
1411  for (i = 0; i < nb_output_streams; i++) {
1412  OutputStream *ost = output_streams[i];
1413  OutputFile *of = output_files[ost->file_index];
1415  AVCodecContext *enc = ost->enc_ctx;
1416  int ret = 0;
1417 
1418  if (!ost->filter || !ost->filter->graph->graph)
1419  continue;
1420  filter = ost->filter->filter;
1421 
1422  if (!ost->initialized) {
1423  char error[1024] = "";
1424  ret = init_output_stream(ost, error, sizeof(error));
1425  if (ret < 0) {
1426  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1427  ost->file_index, ost->index, error);
1428  exit_program(1);
1429  }
1430  }
1431 
1432  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1433  return AVERROR(ENOMEM);
1434  }
1435  filtered_frame = ost->filtered_frame;
1436 
1437  while (1) {
1438  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1439  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1441  if (ret < 0) {
1442  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1444  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1445  } else if (flush && ret == AVERROR_EOF) {
1447  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1448  }
1449  break;
1450  }
1451  if (ost->finished) {
1452  av_frame_unref(filtered_frame);
1453  continue;
1454  }
1455  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1456  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1457  AVRational filter_tb = av_buffersink_get_time_base(filter);
1458  AVRational tb = enc->time_base;
1459  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1460 
1461  tb.den <<= extra_bits;
1462  float_pts =
1463  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1464  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1465  float_pts /= 1 << extra_bits;
1466  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1467  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1468 
1469  filtered_frame->pts =
1470  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1471  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1472  }
1473  //if (ost->source_index >= 0)
1474  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1475 
1476  switch (av_buffersink_get_type(filter)) {
1477  case AVMEDIA_TYPE_VIDEO:
1478  if (!ost->frame_aspect_ratio.num)
1479  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1480 
1481  if (debug_ts) {
1482  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1483  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1484  float_pts,
1485  enc->time_base.num, enc->time_base.den);
1486  }
1487 
1488  do_video_out(of, ost, filtered_frame, float_pts);
1489  break;
1490  case AVMEDIA_TYPE_AUDIO:
1491  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1492  enc->channels != filtered_frame->channels) {
1494  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1495  break;
1496  }
1497  do_audio_out(of, ost, filtered_frame);
1498  break;
1499  default:
1500  // TODO support subtitle filters
1501  av_assert0(0);
1502  }
1503 
1504  av_frame_unref(filtered_frame);
1505  }
1506  }
1507 
1508  return 0;
1509 }
1510 
1511 static void print_final_stats(int64_t total_size)
1512 {
1513  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1514  uint64_t subtitle_size = 0;
1515  uint64_t data_size = 0;
1516  float percent = -1.0;
1517  int i, j;
1518  int pass1_used = 1;
1519 
1520  for (i = 0; i < nb_output_streams; i++) {
1521  OutputStream *ost = output_streams[i];
1522  switch (ost->enc_ctx->codec_type) {
1523  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1524  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1525  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1526  default: other_size += ost->data_size; break;
1527  }
1528  extra_size += ost->enc_ctx->extradata_size;
1529  data_size += ost->data_size;
1532  pass1_used = 0;
1533  }
1534 
1535  if (data_size && total_size>0 && total_size >= data_size)
1536  percent = 100.0 * (total_size - data_size) / data_size;
1537 
1538  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1539  video_size / 1024.0,
1540  audio_size / 1024.0,
1541  subtitle_size / 1024.0,
1542  other_size / 1024.0,
1543  extra_size / 1024.0);
1544  if (percent >= 0.0)
1545  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1546  else
1547  av_log(NULL, AV_LOG_INFO, "unknown");
1548  av_log(NULL, AV_LOG_INFO, "\n");
1549 
1550  /* print verbose per-stream stats */
1551  for (i = 0; i < nb_input_files; i++) {
1552  InputFile *f = input_files[i];
1553  uint64_t total_packets = 0, total_size = 0;
1554 
1555  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1556  i, f->ctx->filename);
1557 
1558  for (j = 0; j < f->nb_streams; j++) {
1559  InputStream *ist = input_streams[f->ist_index + j];
1560  enum AVMediaType type = ist->dec_ctx->codec_type;
1561 
1562  total_size += ist->data_size;
1563  total_packets += ist->nb_packets;
1564 
1565  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1566  i, j, media_type_string(type));
1567  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1568  ist->nb_packets, ist->data_size);
1569 
1570  if (ist->decoding_needed) {
1571  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1572  ist->frames_decoded);
1573  if (type == AVMEDIA_TYPE_AUDIO)
1574  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1575  av_log(NULL, AV_LOG_VERBOSE, "; ");
1576  }
1577 
1578  av_log(NULL, AV_LOG_VERBOSE, "\n");
1579  }
1580 
1581  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1582  total_packets, total_size);
1583  }
1584 
1585  for (i = 0; i < nb_output_files; i++) {
1586  OutputFile *of = output_files[i];
1587  uint64_t total_packets = 0, total_size = 0;
1588 
1589  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1590  i, of->ctx->filename);
1591 
1592  for (j = 0; j < of->ctx->nb_streams; j++) {
1593  OutputStream *ost = output_streams[of->ost_index + j];
1594  enum AVMediaType type = ost->enc_ctx->codec_type;
1595 
1596  total_size += ost->data_size;
1597  total_packets += ost->packets_written;
1598 
1599  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1600  i, j, media_type_string(type));
1601  if (ost->encoding_needed) {
1602  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1603  ost->frames_encoded);
1604  if (type == AVMEDIA_TYPE_AUDIO)
1605  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1606  av_log(NULL, AV_LOG_VERBOSE, "; ");
1607  }
1608 
1609  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1610  ost->packets_written, ost->data_size);
1611 
1612  av_log(NULL, AV_LOG_VERBOSE, "\n");
1613  }
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1616  total_packets, total_size);
1617  }
1618  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1619  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1620  if (pass1_used) {
1621  av_log(NULL, AV_LOG_WARNING, "\n");
1622  } else {
1623  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1624  }
1625  }
1626 }
1627 
1628 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1629 {
1630  char buf[1024];
1631  AVBPrint buf_script;
1632  OutputStream *ost;
1633  AVFormatContext *oc;
1634  int64_t total_size;
1635  AVCodecContext *enc;
1636  int frame_number, vid, i;
1637  double bitrate;
1638  double speed;
1639  int64_t pts = INT64_MIN + 1;
1640  static int64_t last_time = -1;
1641  static int qp_histogram[52];
1642  int hours, mins, secs, us;
1643  int ret;
1644  float t;
1645 
1646  if (!print_stats && !is_last_report && !progress_avio)
1647  return;
1648 
1649  if (!is_last_report) {
1650  if (last_time == -1) {
1651  last_time = cur_time;
1652  return;
1653  }
1654  if ((cur_time - last_time) < 500000)
1655  return;
1656  last_time = cur_time;
1657  }
1658 
1659  t = (cur_time-timer_start) / 1000000.0;
1660 
1661 
1662  oc = output_files[0]->ctx;
1663 
1664  total_size = avio_size(oc->pb);
1665  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1666  total_size = avio_tell(oc->pb);
1667 
1668  buf[0] = '\0';
1669  vid = 0;
1670  av_bprint_init(&buf_script, 0, 1);
1671  for (i = 0; i < nb_output_streams; i++) {
1672  float q = -1;
1673  ost = output_streams[i];
1674  enc = ost->enc_ctx;
1675  if (!ost->stream_copy)
1676  q = ost->quality / (float) FF_QP2LAMBDA;
1677 
1678  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1679  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1680  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1681  ost->file_index, ost->index, q);
1682  }
1683  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1684  float fps;
1685 
1686  frame_number = ost->frame_number;
1687  fps = t > 1 ? frame_number / t : 0;
1688  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1689  frame_number, fps < 9.95, fps, q);
1690  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1691  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1692  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1693  ost->file_index, ost->index, q);
1694  if (is_last_report)
1695  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1696  if (qp_hist) {
1697  int j;
1698  int qp = lrintf(q);
1699  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1700  qp_histogram[qp]++;
1701  for (j = 0; j < 32; j++)
1702  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1703  }
1704 
1705  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1706  int j;
1707  double error, error_sum = 0;
1708  double scale, scale_sum = 0;
1709  double p;
1710  char type[3] = { 'Y','U','V' };
1711  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1712  for (j = 0; j < 3; j++) {
1713  if (is_last_report) {
1714  error = enc->error[j];
1715  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1716  } else {
1717  error = ost->error[j];
1718  scale = enc->width * enc->height * 255.0 * 255.0;
1719  }
1720  if (j)
1721  scale /= 4;
1722  error_sum += error;
1723  scale_sum += scale;
1724  p = psnr(error / scale);
1725  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1726  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1727  ost->file_index, ost->index, type[j] | 32, p);
1728  }
1729  p = psnr(error_sum / scale_sum);
1730  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1731  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1732  ost->file_index, ost->index, p);
1733  }
1734  vid = 1;
1735  }
1736  /* compute min output value */
1738  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1739  ost->st->time_base, AV_TIME_BASE_Q));
1740  if (is_last_report)
1741  nb_frames_drop += ost->last_dropped;
1742  }
1743 
1744  secs = FFABS(pts) / AV_TIME_BASE;
1745  us = FFABS(pts) % AV_TIME_BASE;
1746  mins = secs / 60;
1747  secs %= 60;
1748  hours = mins / 60;
1749  mins %= 60;
1750 
1751  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1752  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1753 
1754  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1755  "size=N/A time=");
1756  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1757  "size=%8.0fkB time=", total_size / 1024.0);
1758  if (pts < 0)
1759  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1760  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1761  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1762  (100 * us) / AV_TIME_BASE);
1763 
1764  if (bitrate < 0) {
1765  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1766  av_bprintf(&buf_script, "bitrate=N/A\n");
1767  }else{
1768  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1769  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1770  }
1771 
1772  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1773  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1774  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1775  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1776  hours, mins, secs, us);
1777 
1779  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1781  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1782  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1783 
1784  if (speed < 0) {
1785  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1786  av_bprintf(&buf_script, "speed=N/A\n");
1787  } else {
1788  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1789  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1790  }
1791 
1792  if (print_stats || is_last_report) {
1793  const char end = is_last_report ? '\n' : '\r';
1794  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1795  fprintf(stderr, "%s %c", buf, end);
1796  } else
1797  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1798 
1799  fflush(stderr);
1800  }
1801 
1802  if (progress_avio) {
1803  av_bprintf(&buf_script, "progress=%s\n",
1804  is_last_report ? "end" : "continue");
1805  avio_write(progress_avio, buf_script.str,
1806  FFMIN(buf_script.len, buf_script.size - 1));
1807  avio_flush(progress_avio);
1808  av_bprint_finalize(&buf_script, NULL);
1809  if (is_last_report) {
1810  if ((ret = avio_closep(&progress_avio)) < 0)
1812  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1813  }
1814  }
1815 
1816  if (is_last_report)
1817  print_final_stats(total_size);
1818 }
1819 
1820 static void flush_encoders(void)
1821 {
1822  int i, ret;
1823 
1824  for (i = 0; i < nb_output_streams; i++) {
1825  OutputStream *ost = output_streams[i];
1826  AVCodecContext *enc = ost->enc_ctx;
1827  OutputFile *of = output_files[ost->file_index];
1828 
1829  if (!ost->encoding_needed)
1830  continue;
1831 
1832  // Try to enable encoding with no input frames.
1833  // Maybe we should just let encoding fail instead.
1834  if (!ost->initialized) {
1835  FilterGraph *fg = ost->filter->graph;
1836  char error[1024] = "";
1837 
1839  "Finishing stream %d:%d without any data written to it.\n",
1840  ost->file_index, ost->st->index);
1841 
1842  if (ost->filter && !fg->graph) {
1843  int x;
1844  for (x = 0; x < fg->nb_inputs; x++) {
1845  InputFilter *ifilter = fg->inputs[x];
1846  if (ifilter->format < 0) {
1847  AVCodecParameters *par = ifilter->ist->st->codecpar;
1848  // We never got any input. Set a fake format, which will
1849  // come from libavformat.
1850  ifilter->format = par->format;
1851  ifilter->sample_rate = par->sample_rate;
1852  ifilter->channels = par->channels;
1853  ifilter->channel_layout = par->channel_layout;
1854  ifilter->width = par->width;
1855  ifilter->height = par->height;
1856  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1857  }
1858  }
1859 
1861  continue;
1862 
1863  ret = configure_filtergraph(fg);
1864  if (ret < 0) {
1865  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1866  exit_program(1);
1867  }
1868 
1869  finish_output_stream(ost);
1870  }
1871 
1872  ret = init_output_stream(ost, error, sizeof(error));
1873  if (ret < 0) {
1874  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1875  ost->file_index, ost->index, error);
1876  exit_program(1);
1877  }
1878  }
1879 
1880  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1881  continue;
1882 #if FF_API_LAVF_FMT_RAWPICTURE
1883  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1884  continue;
1885 #endif
1886 
1888  continue;
1889 
1890  for (;;) {
1891  const char *desc = NULL;
1892  AVPacket pkt;
1893  int pkt_size;
1894 
1895  switch (enc->codec_type) {
1896  case AVMEDIA_TYPE_AUDIO:
1897  desc = "audio";
1898  break;
1899  case AVMEDIA_TYPE_VIDEO:
1900  desc = "video";
1901  break;
1902  default:
1903  av_assert0(0);
1904  }
1905 
1906  av_init_packet(&pkt);
1907  pkt.data = NULL;
1908  pkt.size = 0;
1909 
1911 
1912  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1913  ret = avcodec_send_frame(enc, NULL);
1914  if (ret < 0) {
1915  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1916  desc,
1917  av_err2str(ret));
1918  exit_program(1);
1919  }
1920  }
1921 
1922  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1923  if (ret < 0 && ret != AVERROR_EOF) {
1924  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1925  desc,
1926  av_err2str(ret));
1927  exit_program(1);
1928  }
1929  if (ost->logfile && enc->stats_out) {
1930  fprintf(ost->logfile, "%s", enc->stats_out);
1931  }
1932  if (ret == AVERROR_EOF) {
1933  break;
1934  }
1935  if (ost->finished & MUXER_FINISHED) {
1936  av_packet_unref(&pkt);
1937  continue;
1938  }
1939  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1940  pkt_size = pkt.size;
1941  output_packet(of, &pkt, ost);
1943  do_video_stats(ost, pkt_size);
1944  }
1945  }
1946  }
1947 }
1948 
1949 /*
1950  * Check whether a packet from ist should be written into ost at this time
1951  */
1953 {
1954  OutputFile *of = output_files[ost->file_index];
1955  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1956 
1957  if (ost->source_index != ist_index)
1958  return 0;
1959 
1960  if (ost->finished)
1961  return 0;
1962 
1963  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1964  return 0;
1965 
1966  return 1;
1967 }
1968 
1969 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1970 {
1971  OutputFile *of = output_files[ost->file_index];
1972  InputFile *f = input_files [ist->file_index];
1973  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1974  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1975  AVPicture pict;
1976  AVPacket opkt;
1977 
1978  av_init_packet(&opkt);
1979 
1980  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1982  return;
1983 
1984  if (!ost->frame_number && !ost->copy_prior_start) {
1985  int64_t comp_start = start_time;
1986  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1987  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1988  if (pkt->pts == AV_NOPTS_VALUE ?
1989  ist->pts < comp_start :
1990  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1991  return;
1992  }
1993 
1994  if (of->recording_time != INT64_MAX &&
1995  ist->pts >= of->recording_time + start_time) {
1996  close_output_stream(ost);
1997  return;
1998  }
1999 
2000  if (f->recording_time != INT64_MAX) {
2001  start_time = f->ctx->start_time;
2002  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2003  start_time += f->start_time;
2004  if (ist->pts >= f->recording_time + start_time) {
2005  close_output_stream(ost);
2006  return;
2007  }
2008  }
2009 
2010  /* force the input stream PTS */
2011  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2012  ost->sync_opts++;
2013 
2014  if (pkt->pts != AV_NOPTS_VALUE)
2015  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2016  else
2017  opkt.pts = AV_NOPTS_VALUE;
2018 
2019  if (pkt->dts == AV_NOPTS_VALUE)
2020  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2021  else
2022  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2023  opkt.dts -= ost_tb_start_time;
2024 
2025  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2027  if(!duration)
2028  duration = ist->dec_ctx->frame_size;
2029  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2031  ost->mux_timebase) - ost_tb_start_time;
2032  }
2033 
2034  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2035 
2036  opkt.flags = pkt->flags;
2037  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2038  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2039  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2040  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2041  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2042  ) {
2043  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2044  &opkt.data, &opkt.size,
2045  pkt->data, pkt->size,
2047  if (ret < 0) {
2048  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2049  av_err2str(ret));
2050  exit_program(1);
2051  }
2052  if (ret) {
2053  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2054  if (!opkt.buf)
2055  exit_program(1);
2056  }
2057  } else {
2058  opkt.data = pkt->data;
2059  opkt.size = pkt->size;
2060  }
2061  av_copy_packet_side_data(&opkt, pkt);
2062 
2063 #if FF_API_LAVF_FMT_RAWPICTURE
2064  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2065  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2066  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2067  /* store AVPicture in AVPacket, as expected by the output format */
2068  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2069  if (ret < 0) {
2070  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2071  av_err2str(ret));
2072  exit_program(1);
2073  }
2074  opkt.data = (uint8_t *)&pict;
2075  opkt.size = sizeof(AVPicture);
2076  opkt.flags |= AV_PKT_FLAG_KEY;
2077  }
2078 #endif
2079 
2080  output_packet(of, &opkt, ost);
2081 }
2082 
2084 {
2085  AVCodecContext *dec = ist->dec_ctx;
2086 
2087  if (!dec->channel_layout) {
2088  char layout_name[256];
2089 
2090  if (dec->channels > ist->guess_layout_max)
2091  return 0;
2093  if (!dec->channel_layout)
2094  return 0;
2095  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2096  dec->channels, dec->channel_layout);
2097  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2098  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2099  }
2100  return 1;
2101 }
2102 
2103 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2104 {
2105  if (*got_output || ret<0)
2106  decode_error_stat[ret<0] ++;
2107 
2108  if (ret < 0 && exit_on_error)
2109  exit_program(1);
2110 
2111  if (exit_on_error && *got_output && ist) {
2113  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2114  exit_program(1);
2115  }
2116  }
2117 }
2118 
2119 // Filters can be configured only if the formats of all inputs are known.
2121 {
2122  int i;
2123  for (i = 0; i < fg->nb_inputs; i++) {
2124  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2126  return 0;
2127  }
2128  return 1;
2129 }
2130 
2132 {
2133  FilterGraph *fg = ifilter->graph;
2134  int need_reinit, ret, i;
2135 
2136  /* determine if the parameters for this input changed */
2137  need_reinit = ifilter->format != frame->format;
2138  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2139  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2140  need_reinit = 1;
2141 
2142  switch (ifilter->ist->st->codecpar->codec_type) {
2143  case AVMEDIA_TYPE_AUDIO:
2144  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2145  ifilter->channels != frame->channels ||
2146  ifilter->channel_layout != frame->channel_layout;
2147  break;
2148  case AVMEDIA_TYPE_VIDEO:
2149  need_reinit |= ifilter->width != frame->width ||
2150  ifilter->height != frame->height;
2151  break;
2152  }
2153 
2154  if (need_reinit) {
2155  ret = ifilter_parameters_from_frame(ifilter, frame);
2156  if (ret < 0)
2157  return ret;
2158  }
2159 
2160  /* (re)init the graph if possible, otherwise buffer the frame and return */
2161  if (need_reinit || !fg->graph) {
2162  for (i = 0; i < fg->nb_inputs; i++) {
2163  if (!ifilter_has_all_input_formats(fg)) {
2164  AVFrame *tmp = av_frame_clone(frame);
2165  if (!tmp)
2166  return AVERROR(ENOMEM);
2167  av_frame_unref(frame);
2168 
2169  if (!av_fifo_space(ifilter->frame_queue)) {
2170  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2171  if (ret < 0) {
2172  av_frame_free(&tmp);
2173  return ret;
2174  }
2175  }
2176  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2177  return 0;
2178  }
2179  }
2180 
2181  ret = reap_filters(1);
2182  if (ret < 0 && ret != AVERROR_EOF) {
2183  char errbuf[128];
2184  av_strerror(ret, errbuf, sizeof(errbuf));
2185 
2186  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2187  return ret;
2188  }
2189 
2190  ret = configure_filtergraph(fg);
2191  if (ret < 0) {
2192  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2193  return ret;
2194  }
2195  }
2196 
2198  if (ret < 0) {
2199  if (ret != AVERROR_EOF)
2200  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2201  return ret;
2202  }
2203 
2204  return 0;
2205 }
2206 
2207 static int ifilter_send_eof(InputFilter *ifilter)
2208 {
2209  int i, j, ret;
2210 
2211  ifilter->eof = 1;
2212 
2213  if (ifilter->filter) {
2215  if (ret < 0)
2216  return ret;
2217  } else {
2218  // the filtergraph was never configured
2219  FilterGraph *fg = ifilter->graph;
2220  for (i = 0; i < fg->nb_inputs; i++)
2221  if (!fg->inputs[i]->eof)
2222  break;
2223  if (i == fg->nb_inputs) {
2224  // All the input streams have finished without the filtergraph
2225  // ever being configured.
2226  // Mark the output streams as finished.
2227  for (j = 0; j < fg->nb_outputs; j++)
2228  finish_output_stream(fg->outputs[j]->ost);
2229  }
2230  }
2231 
2232  return 0;
2233 }
2234 
2235 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2236 // There is the following difference: if you got a frame, you must call
2237 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2238 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2239 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2240 {
2241  int ret;
2242 
2243  *got_frame = 0;
2244 
2245  if (pkt) {
2246  ret = avcodec_send_packet(avctx, pkt);
2247  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2248  // decoded frames with avcodec_receive_frame() until done.
2249  if (ret < 0 && ret != AVERROR_EOF)
2250  return ret;
2251  }
2252 
2253  ret = avcodec_receive_frame(avctx, frame);
2254  if (ret < 0 && ret != AVERROR(EAGAIN))
2255  return ret;
2256  if (ret >= 0)
2257  *got_frame = 1;
2258 
2259  return 0;
2260 }
2261 
2262 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2263 {
2264  int i, ret;
2265  AVFrame *f;
2266 
2267  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2268  for (i = 0; i < ist->nb_filters; i++) {
2269  if (i < ist->nb_filters - 1) {
2270  f = ist->filter_frame;
2271  ret = av_frame_ref(f, decoded_frame);
2272  if (ret < 0)
2273  break;
2274  } else
2275  f = decoded_frame;
2276  ret = ifilter_send_frame(ist->filters[i], f);
2277  if (ret == AVERROR_EOF)
2278  ret = 0; /* ignore */
2279  if (ret < 0) {
2281  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2282  break;
2283  }
2284  }
2285  return ret;
2286 }
2287 
2288 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2289  int *decode_failed)
2290 {
2291  AVFrame *decoded_frame;
2292  AVCodecContext *avctx = ist->dec_ctx;
2293  int ret, err = 0;
2294  AVRational decoded_frame_tb;
2295 
2296  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2297  return AVERROR(ENOMEM);
2298  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2299  return AVERROR(ENOMEM);
2300  decoded_frame = ist->decoded_frame;
2301 
2303  ret = decode(avctx, decoded_frame, got_output, pkt);
2304  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2305  if (ret < 0)
2306  *decode_failed = 1;
2307 
2308  if (ret >= 0 && avctx->sample_rate <= 0) {
2309  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2310  ret = AVERROR_INVALIDDATA;
2311  }
2312 
2313  if (ret != AVERROR_EOF)
2314  check_decode_result(ist, got_output, ret);
2315 
2316  if (!*got_output || ret < 0)
2317  return ret;
2318 
2319  ist->samples_decoded += decoded_frame->nb_samples;
2320  ist->frames_decoded++;
2321 
2322 #if 1
2323  /* increment next_dts to use for the case where the input stream does not
2324  have timestamps or there are multiple frames in the packet */
2325  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2326  avctx->sample_rate;
2327  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2328  avctx->sample_rate;
2329 #endif
2330 
2331  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2332  decoded_frame_tb = ist->st->time_base;
2333  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2334  decoded_frame->pts = pkt->pts;
2335  decoded_frame_tb = ist->st->time_base;
2336  }else {
2337  decoded_frame->pts = ist->dts;
2338  decoded_frame_tb = AV_TIME_BASE_Q;
2339  }
2340  if (decoded_frame->pts != AV_NOPTS_VALUE)
2341  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2342  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2343  (AVRational){1, avctx->sample_rate});
2344  ist->nb_samples = decoded_frame->nb_samples;
2345  err = send_frame_to_filters(ist, decoded_frame);
2346 
2347  av_frame_unref(ist->filter_frame);
2348  av_frame_unref(decoded_frame);
2349  return err < 0 ? err : ret;
2350 }
2351 
2352 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2353  int *decode_failed)
2354 {
2355  AVFrame *decoded_frame;
2356  int i, ret = 0, err = 0;
2357  int64_t best_effort_timestamp;
2358  int64_t dts = AV_NOPTS_VALUE;
2359  AVPacket avpkt;
2360 
2361  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2362  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2363  // skip the packet.
2364  if (!eof && pkt && pkt->size == 0)
2365  return 0;
2366 
2367  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2368  return AVERROR(ENOMEM);
2369  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2370  return AVERROR(ENOMEM);
2371  decoded_frame = ist->decoded_frame;
2372  if (ist->dts != AV_NOPTS_VALUE)
2373  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2374  if (pkt) {
2375  avpkt = *pkt;
2376  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2377  }
2378 
2379  // The old code used to set dts on the drain packet, which does not work
2380  // with the new API anymore.
2381  if (eof) {
2382  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2383  if (!new)
2384  return AVERROR(ENOMEM);
2385  ist->dts_buffer = new;
2386  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2387  }
2388 
2390  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2391  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2392  if (ret < 0)
2393  *decode_failed = 1;
2394 
2395  // The following line may be required in some cases where there is no parser
2396  // or the parser does not has_b_frames correctly
2397  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2398  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2399  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2400  } else
2402  "video_delay is larger in decoder than demuxer %d > %d.\n"
2403  "If you want to help, upload a sample "
2404  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2405  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2406  ist->dec_ctx->has_b_frames,
2407  ist->st->codecpar->video_delay);
2408  }
2409 
2410  if (ret != AVERROR_EOF)
2411  check_decode_result(ist, got_output, ret);
2412 
2413  if (*got_output && ret >= 0) {
2414  if (ist->dec_ctx->width != decoded_frame->width ||
2415  ist->dec_ctx->height != decoded_frame->height ||
2416  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2417  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2418  decoded_frame->width,
2419  decoded_frame->height,
2420  decoded_frame->format,
2421  ist->dec_ctx->width,
2422  ist->dec_ctx->height,
2423  ist->dec_ctx->pix_fmt);
2424  }
2425  }
2426 
2427  if (!*got_output || ret < 0)
2428  return ret;
2429 
2430  if(ist->top_field_first>=0)
2431  decoded_frame->top_field_first = ist->top_field_first;
2432 
2433  ist->frames_decoded++;
2434 
2435  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2436  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2437  if (err < 0)
2438  goto fail;
2439  }
2440  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2441 
2442  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2443 
2444  if (ist->framerate.num)
2445  best_effort_timestamp = ist->cfr_next_pts++;
2446 
2447  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2448  best_effort_timestamp = ist->dts_buffer[0];
2449 
2450  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2451  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2452  ist->nb_dts_buffer--;
2453  }
2454 
2455  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2456  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2457 
2458  if (ts != AV_NOPTS_VALUE)
2459  ist->next_pts = ist->pts = ts;
2460  }
2461 
2462  if (debug_ts) {
2463  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2464  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2465  ist->st->index, av_ts2str(decoded_frame->pts),
2466  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2467  best_effort_timestamp,
2468  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2469  decoded_frame->key_frame, decoded_frame->pict_type,
2470  ist->st->time_base.num, ist->st->time_base.den);
2471  }
2472 
2473  if (ist->st->sample_aspect_ratio.num)
2474  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2475 
2476  err = send_frame_to_filters(ist, decoded_frame);
2477 
2478 fail:
2480  av_frame_unref(decoded_frame);
2481  return err < 0 ? err : ret;
2482 }
2483 
2484 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2485  int *decode_failed)
2486 {
2487  AVSubtitle subtitle;
2488  int free_sub = 1;
2489  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2490  &subtitle, got_output, pkt);
2491 
2492  check_decode_result(NULL, got_output, ret);
2493 
2494  if (ret < 0 || !*got_output) {
2495  *decode_failed = 1;
2496  if (!pkt->size)
2497  sub2video_flush(ist);
2498  return ret;
2499  }
2500 
2501  if (ist->fix_sub_duration) {
2502  int end = 1;
2503  if (ist->prev_sub.got_output) {
2504  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2505  1000, AV_TIME_BASE);
2506  if (end < ist->prev_sub.subtitle.end_display_time) {
2507  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2508  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2510  end <= 0 ? ", dropping it" : "");
2512  }
2513  }
2514  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2515  FFSWAP(int, ret, ist->prev_sub.ret);
2516  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2517  if (end <= 0)
2518  goto out;
2519  }
2520 
2521  if (!*got_output)
2522  return ret;
2523 
2524  if (ist->sub2video.frame) {
2525  sub2video_update(ist, &subtitle);
2526  } else if (ist->nb_filters) {
2527  if (!ist->sub2video.sub_queue)
2528  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2529  if (!ist->sub2video.sub_queue)
2530  exit_program(1);
2531  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2533  if (ret < 0)
2534  exit_program(1);
2535  }
2536  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2537  free_sub = 0;
2538  }
2539 
2540  if (!subtitle.num_rects)
2541  goto out;
2542 
2543  ist->frames_decoded++;
2544 
2545  for (i = 0; i < nb_output_streams; i++) {
2546  OutputStream *ost = output_streams[i];
2547 
2548  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2549  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2550  continue;
2551 
2552  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2553  }
2554 
2555 out:
2556  if (free_sub)
2557  avsubtitle_free(&subtitle);
2558  return ret;
2559 }
2560 
2562 {
2563  int i, ret;
2564  for (i = 0; i < ist->nb_filters; i++) {
2565  ret = ifilter_send_eof(ist->filters[i]);
2566  if (ret < 0)
2567  return ret;
2568  }
2569  return 0;
2570 }
2571 
2572 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2573 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2574 {
2575  int ret = 0, i;
2576  int repeating = 0;
2577  int eof_reached = 0;
2578 
2579  AVPacket avpkt;
2580  if (!ist->saw_first_ts) {
2581  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2582  ist->pts = 0;
2583  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2584  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2585  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2586  }
2587  ist->saw_first_ts = 1;
2588  }
2589 
2590  if (ist->next_dts == AV_NOPTS_VALUE)
2591  ist->next_dts = ist->dts;
2592  if (ist->next_pts == AV_NOPTS_VALUE)
2593  ist->next_pts = ist->pts;
2594 
2595  if (!pkt) {
2596  /* EOF handling */
2597  av_init_packet(&avpkt);
2598  avpkt.data = NULL;
2599  avpkt.size = 0;
2600  } else {
2601  avpkt = *pkt;
2602  }
2603 
2604  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2605  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2606  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2607  ist->next_pts = ist->pts = ist->dts;
2608  }
2609 
2610  // while we have more to decode or while the decoder did output something on EOF
2611  while (ist->decoding_needed) {
2612  int64_t duration = 0;
2613  int got_output = 0;
2614  int decode_failed = 0;
2615 
2616  ist->pts = ist->next_pts;
2617  ist->dts = ist->next_dts;
2618 
2619  switch (ist->dec_ctx->codec_type) {
2620  case AVMEDIA_TYPE_AUDIO:
2621  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2622  &decode_failed);
2623  break;
2624  case AVMEDIA_TYPE_VIDEO:
2625  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2626  &decode_failed);
2627  if (!repeating || !pkt || got_output) {
2628  if (pkt && pkt->duration) {
2629  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2632  duration = ((int64_t)AV_TIME_BASE *
2633  ist->dec_ctx->framerate.den * ticks) /
2635  }
2636 
2637  if(ist->dts != AV_NOPTS_VALUE && duration) {
2638  ist->next_dts += duration;
2639  }else
2640  ist->next_dts = AV_NOPTS_VALUE;
2641  }
2642 
2643  if (got_output)
2644  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2645  break;
2646  case AVMEDIA_TYPE_SUBTITLE:
2647  if (repeating)
2648  break;
2649  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2650  if (!pkt && ret >= 0)
2651  ret = AVERROR_EOF;
2652  break;
2653  default:
2654  return -1;
2655  }
2656 
2657  if (ret == AVERROR_EOF) {
2658  eof_reached = 1;
2659  break;
2660  }
2661 
2662  if (ret < 0) {
2663  if (decode_failed) {
2664  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2665  ist->file_index, ist->st->index, av_err2str(ret));
2666  } else {
2667  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2668  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2669  }
2670  if (!decode_failed || exit_on_error)
2671  exit_program(1);
2672  break;
2673  }
2674 
2675  if (got_output)
2676  ist->got_output = 1;
2677 
2678  if (!got_output)
2679  break;
2680 
2681  // During draining, we might get multiple output frames in this loop.
2682  // ffmpeg.c does not drain the filter chain on configuration changes,
2683  // which means if we send multiple frames at once to the filters, and
2684  // one of those frames changes configuration, the buffered frames will
2685  // be lost. This can upset certain FATE tests.
2686  // Decode only 1 frame per call on EOF to appease these FATE tests.
2687  // The ideal solution would be to rewrite decoding to use the new
2688  // decoding API in a better way.
2689  if (!pkt)
2690  break;
2691 
2692  repeating = 1;
2693  }
2694 
2695  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2696  /* except when looping we need to flush but not to send an EOF */
2697  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2698  int ret = send_filter_eof(ist);
2699  if (ret < 0) {
2700  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2701  exit_program(1);
2702  }
2703  }
2704 
2705  /* handle stream copy */
2706  if (!ist->decoding_needed) {
2707  ist->dts = ist->next_dts;
2708  switch (ist->dec_ctx->codec_type) {
2709  case AVMEDIA_TYPE_AUDIO:
2710  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2711  ist->dec_ctx->sample_rate;
2712  break;
2713  case AVMEDIA_TYPE_VIDEO:
2714  if (ist->framerate.num) {
2715  // TODO: Remove work-around for c99-to-c89 issue 7
2716  AVRational time_base_q = AV_TIME_BASE_Q;
2717  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2718  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2719  } else if (pkt->duration) {
2720  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721  } else if(ist->dec_ctx->framerate.num != 0) {
2722  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2723  ist->next_dts += ((int64_t)AV_TIME_BASE *
2724  ist->dec_ctx->framerate.den * ticks) /
2726  }
2727  break;
2728  }
2729  ist->pts = ist->dts;
2730  ist->next_pts = ist->next_dts;
2731  }
2732  for (i = 0; pkt && i < nb_output_streams; i++) {
2733  OutputStream *ost = output_streams[i];
2734 
2735  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2736  continue;
2737 
2738  do_streamcopy(ist, ost, pkt);
2739  }
2740 
2741  return !eof_reached;
2742 }
2743 
2744 static void print_sdp(void)
2745 {
2746  char sdp[16384];
2747  int i;
2748  int j;
2749  AVIOContext *sdp_pb;
2750  AVFormatContext **avc;
2751 
2752  for (i = 0; i < nb_output_files; i++) {
2753  if (!output_files[i]->header_written)
2754  return;
2755  }
2756 
2757  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2758  if (!avc)
2759  exit_program(1);
2760  for (i = 0, j = 0; i < nb_output_files; i++) {
2761  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2762  avc[j] = output_files[i]->ctx;
2763  j++;
2764  }
2765  }
2766 
2767  if (!j)
2768  goto fail;
2769 
2770  av_sdp_create(avc, j, sdp, sizeof(sdp));
2771 
2772  if (!sdp_filename) {
2773  printf("SDP:\n%s\n", sdp);
2774  fflush(stdout);
2775  } else {
2776  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2777  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2778  } else {
2779  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2780  avio_closep(&sdp_pb);
2782  }
2783  }
2784 
2785 fail:
2786  av_freep(&avc);
2787 }
2788 
2790 {
2791  int i;
2792  for (i = 0; hwaccels[i].name; i++)
2793  if (hwaccels[i].pix_fmt == pix_fmt)
2794  return &hwaccels[i];
2795  return NULL;
2796 }
2797 
2799 {
2800  InputStream *ist = s->opaque;
2801  const enum AVPixelFormat *p;
2802  int ret;
2803 
2804  for (p = pix_fmts; *p != -1; p++) {
2806  const HWAccel *hwaccel;
2807 
2808  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2809  break;
2810 
2811  hwaccel = get_hwaccel(*p);
2812  if (!hwaccel ||
2813  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2814  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2815  continue;
2816 
2817  ret = hwaccel->init(s);
2818  if (ret < 0) {
2819  if (ist->hwaccel_id == hwaccel->id) {
2821  "%s hwaccel requested for input stream #%d:%d, "
2822  "but cannot be initialized.\n", hwaccel->name,
2823  ist->file_index, ist->st->index);
2824  return AV_PIX_FMT_NONE;
2825  }
2826  continue;
2827  }
2828 
2829  if (ist->hw_frames_ctx) {
2831  if (!s->hw_frames_ctx)
2832  return AV_PIX_FMT_NONE;
2833  }
2834 
2835  ist->active_hwaccel_id = hwaccel->id;
2836  ist->hwaccel_pix_fmt = *p;
2837  break;
2838  }
2839 
2840  return *p;
2841 }
2842 
2844 {
2845  InputStream *ist = s->opaque;
2846 
2847  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2848  return ist->hwaccel_get_buffer(s, frame, flags);
2849 
2850  return avcodec_default_get_buffer2(s, frame, flags);
2851 }
2852 
2853 static int init_input_stream(int ist_index, char *error, int error_len)
2854 {
2855  int ret;
2856  InputStream *ist = input_streams[ist_index];
2857 
2858  if (ist->decoding_needed) {
2859  AVCodec *codec = ist->dec;
2860  if (!codec) {
2861  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2862  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2863  return AVERROR(EINVAL);
2864  }
2865 
2866  ist->dec_ctx->opaque = ist;
2867  ist->dec_ctx->get_format = get_format;
2868  ist->dec_ctx->get_buffer2 = get_buffer;
2869  ist->dec_ctx->thread_safe_callbacks = 1;
2870 
2871  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2872  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2873  (ist->decoding_needed & DECODING_FOR_OST)) {
2874  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2876  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2877  }
2878 
2879  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2880 
2881  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2882  * audio, and video decoders such as cuvid or mediacodec */
2884 
2885  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2886  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2887  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2888  if (ret == AVERROR_EXPERIMENTAL)
2889  abort_codec_experimental(codec, 0);
2890 
2891  snprintf(error, error_len,
2892  "Error while opening decoder for input stream "
2893  "#%d:%d : %s",
2894  ist->file_index, ist->st->index, av_err2str(ret));
2895  return ret;
2896  }
2898  }
2899 
2900  ist->next_pts = AV_NOPTS_VALUE;
2901  ist->next_dts = AV_NOPTS_VALUE;
2902 
2903  return 0;
2904 }
2905 
2907 {
2908  if (ost->source_index >= 0)
2909  return input_streams[ost->source_index];
2910  return NULL;
2911 }
2912 
2913 static int compare_int64(const void *a, const void *b)
2914 {
2915  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2916 }
2917 
2918 /* open the muxer when all the streams are initialized */
2919 static int check_init_output_file(OutputFile *of, int file_index)
2920 {
2921  int ret, i;
2922 
2923  for (i = 0; i < of->ctx->nb_streams; i++) {
2924  OutputStream *ost = output_streams[of->ost_index + i];
2925  if (!ost->initialized)
2926  return 0;
2927  }
2928 
2929  of->ctx->interrupt_callback = int_cb;
2930 
2931  ret = avformat_write_header(of->ctx, &of->opts);
2932  if (ret < 0) {
2934  "Could not write header for output file #%d "
2935  "(incorrect codec parameters ?): %s\n",
2936  file_index, av_err2str(ret));
2937  return ret;
2938  }
2939  //assert_avoptions(of->opts);
2940  of->header_written = 1;
2941 
2942  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2943 
2944  if (sdp_filename || want_sdp)
2945  print_sdp();
2946 
2947  /* flush the muxing queues */
2948  for (i = 0; i < of->ctx->nb_streams; i++) {
2949  OutputStream *ost = output_streams[of->ost_index + i];
2950 
2951  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2952  if (!av_fifo_size(ost->muxing_queue))
2953  ost->mux_timebase = ost->st->time_base;
2954 
2955  while (av_fifo_size(ost->muxing_queue)) {
2956  AVPacket pkt;
2957  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2958  write_packet(of, &pkt, ost, 1);
2959  }
2960  }
2961 
2962  return 0;
2963 }
2964 
2966 {
2967  AVBSFContext *ctx;
2968  int i, ret;
2969 
2970  if (!ost->nb_bitstream_filters)
2971  return 0;
2972 
2973  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2974  ctx = ost->bsf_ctx[i];
2975 
2976  ret = avcodec_parameters_copy(ctx->par_in,
2977  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2978  if (ret < 0)
2979  return ret;
2980 
2981  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2982 
2983  ret = av_bsf_init(ctx);
2984  if (ret < 0) {
2985  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2986  ost->bsf_ctx[i]->filter->name);
2987  return ret;
2988  }
2989  }
2990 
2991  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2992  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2993  if (ret < 0)
2994  return ret;
2995 
2996  ost->st->time_base = ctx->time_base_out;
2997 
2998  return 0;
2999 }
3000 
3002 {
3003  OutputFile *of = output_files[ost->file_index];
3004  InputStream *ist = get_input_stream(ost);
3005  AVCodecParameters *par_dst = ost->st->codecpar;
3006  AVCodecParameters *par_src = ost->ref_par;
3007  AVRational sar;
3008  int i, ret;
3009  uint32_t codec_tag = par_dst->codec_tag;
3010 
3011  av_assert0(ist && !ost->filter);
3012 
3013  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3014  if (ret >= 0)
3015  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3016  if (ret < 0) {
3018  "Error setting up codec context options.\n");
3019  return ret;
3020  }
3022 
3023  if (!codec_tag) {
3024  unsigned int codec_tag_tmp;
3025  if (!of->ctx->oformat->codec_tag ||
3026  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3027  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3028  codec_tag = par_src->codec_tag;
3029  }
3030 
3031  ret = avcodec_parameters_copy(par_dst, par_src);
3032  if (ret < 0)
3033  return ret;
3034 
3035  par_dst->codec_tag = codec_tag;
3036 
3037  if (!ost->frame_rate.num)
3038  ost->frame_rate = ist->framerate;
3039  ost->st->avg_frame_rate = ost->frame_rate;
3040 
3042  if (ret < 0)
3043  return ret;
3044 
3045  // copy timebase while removing common factors
3046  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3048 
3049  // copy estimated duration as a hint to the muxer
3050  if (ost->st->duration <= 0 && ist->st->duration > 0)
3051  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3052 
3053  // copy disposition
3054  ost->st->disposition = ist->st->disposition;
3055 
3056  if (ist->st->nb_side_data) {
3057  for (i = 0; i < ist->st->nb_side_data; i++) {
3058  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3059  uint8_t *dst_data;
3060 
3061  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3062  if (!dst_data)
3063  return AVERROR(ENOMEM);
3064  memcpy(dst_data, sd_src->data, sd_src->size);
3065  }
3066  }
3067 
3068  if (ost->rotate_overridden) {
3070  sizeof(int32_t) * 9);
3071  if (sd)
3072  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3073  }
3074 
3075  ost->parser = av_parser_init(par_dst->codec_id);
3076  ost->parser_avctx = avcodec_alloc_context3(NULL);
3077  if (!ost->parser_avctx)
3078  return AVERROR(ENOMEM);
3079 
3080  switch (par_dst->codec_type) {
3081  case AVMEDIA_TYPE_AUDIO:
3082  if (audio_volume != 256) {
3083  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3084  exit_program(1);
3085  }
3086  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3087  par_dst->block_align= 0;
3088  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3089  par_dst->block_align= 0;
3090  break;
3091  case AVMEDIA_TYPE_VIDEO:
3092  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3093  sar =
3094  av_mul_q(ost->frame_aspect_ratio,
3095  (AVRational){ par_dst->height, par_dst->width });
3096  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3097  "with stream copy may produce invalid files\n");
3098  }
3099  else if (ist->st->sample_aspect_ratio.num)
3100  sar = ist->st->sample_aspect_ratio;
3101  else
3102  sar = par_src->sample_aspect_ratio;
3103  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3104  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3105  ost->st->r_frame_rate = ist->st->r_frame_rate;
3106  break;
3107  }
3108 
3109  ost->mux_timebase = ist->st->time_base;
3110 
3111  return 0;
3112 }
3113 
3115 {
3116  AVDictionaryEntry *e;
3117 
3118  uint8_t *encoder_string;
3119  int encoder_string_len;
3120  int format_flags = 0;
3121  int codec_flags = 0;
3122 
3123  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3124  return;
3125 
3126  e = av_dict_get(of->opts, "fflags", NULL, 0);
3127  if (e) {
3128  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3129  if (!o)
3130  return;
3131  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3132  }
3133  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3134  if (e) {
3135  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3136  if (!o)
3137  return;
3138  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3139  }
3140 
3141  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3142  encoder_string = av_mallocz(encoder_string_len);
3143  if (!encoder_string)
3144  exit_program(1);
3145 
3146  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3147  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3148  else
3149  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3150  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3151  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3153 }
3154 
3155 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3156  AVCodecContext *avctx)
3157 {
3158  char *p;
3159  int n = 1, i, size, index = 0;
3160  int64_t t, *pts;
3161 
3162  for (p = kf; *p; p++)
3163  if (*p == ',')
3164  n++;
3165  size = n;
3166  pts = av_malloc_array(size, sizeof(*pts));
3167  if (!pts) {
3168  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3169  exit_program(1);
3170  }
3171 
3172  p = kf;
3173  for (i = 0; i < n; i++) {
3174  char *next = strchr(p, ',');
3175 
3176  if (next)
3177  *next++ = 0;
3178 
3179  if (!memcmp(p, "chapters", 8)) {
3180 
3181  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3182  int j;
3183 
3184  if (avf->nb_chapters > INT_MAX - size ||
3185  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3186  sizeof(*pts)))) {
3188  "Could not allocate forced key frames array.\n");
3189  exit_program(1);
3190  }
3191  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3192  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3193 
3194  for (j = 0; j < avf->nb_chapters; j++) {
3195  AVChapter *c = avf->chapters[j];
3196  av_assert1(index < size);
3197  pts[index++] = av_rescale_q(c->start, c->time_base,
3198  avctx->time_base) + t;
3199  }
3200 
3201  } else {
3202 
3203  t = parse_time_or_die("force_key_frames", p, 1);
3204  av_assert1(index < size);
3205  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3206 
3207  }
3208 
3209  p = next;
3210  }
3211 
3212  av_assert0(index == size);
3213  qsort(pts, size, sizeof(*pts), compare_int64);
3214  ost->forced_kf_count = size;
3215  ost->forced_kf_pts = pts;
3216 }
3217 
3218 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3219 {
3220  InputStream *ist = get_input_stream(ost);
3221  AVCodecContext *enc_ctx = ost->enc_ctx;
3222  AVFormatContext *oc;
3223 
3224  if (ost->enc_timebase.num > 0) {
3225  enc_ctx->time_base = ost->enc_timebase;
3226  return;
3227  }
3228 
3229  if (ost->enc_timebase.num < 0) {
3230  if (ist) {
3231  enc_ctx->time_base = ist->st->time_base;
3232  return;
3233  }
3234 
3235  oc = output_files[ost->file_index]->ctx;
3236  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3237  }
3238 
3239  enc_ctx->time_base = default_time_base;
3240 }
3241 
3243 {
3244  InputStream *ist = get_input_stream(ost);
3245  AVCodecContext *enc_ctx = ost->enc_ctx;
3247  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3248  int j, ret;
3249 
3250  set_encoder_id(output_files[ost->file_index], ost);
3251 
3252  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3253  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3254  // which have to be filtered out to prevent leaking them to output files.
3255  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3256 
3257  if (ist) {
3258  ost->st->disposition = ist->st->disposition;
3259 
3260  dec_ctx = ist->dec_ctx;
3261 
3262  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3263  } else {
3264  for (j = 0; j < oc->nb_streams; j++) {
3265  AVStream *st = oc->streams[j];
3266  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3267  break;
3268  }
3269  if (j == oc->nb_streams)
3270  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3273  }
3274 
3275  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3276  if (!ost->frame_rate.num)
3278  if (ist && !ost->frame_rate.num)
3279  ost->frame_rate = ist->framerate;
3280  if (ist && !ost->frame_rate.num)
3281  ost->frame_rate = ist->st->r_frame_rate;
3282  if (ist && !ost->frame_rate.num) {
3283  ost->frame_rate = (AVRational){25, 1};
3285  "No information "
3286  "about the input framerate is available. Falling "
3287  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3288  "if you want a different framerate.\n",
3289  ost->file_index, ost->index);
3290  }
3291 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3292  if (ost->enc->supported_framerates && !ost->force_fps) {
3293  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3294  ost->frame_rate = ost->enc->supported_framerates[idx];
3295  }
3296  // reduce frame rate for mpeg4 to be within the spec limits
3297  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3298  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3299  ost->frame_rate.num, ost->frame_rate.den, 65535);
3300  }
3301  }
3302 
3303  switch (enc_ctx->codec_type) {
3304  case AVMEDIA_TYPE_AUDIO:
3306  if (dec_ctx)
3307  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3308  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3312 
3313  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3314  break;
3315 
3316  case AVMEDIA_TYPE_VIDEO:
3318 
3319  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3321  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3323  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3324  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3325  }
3326  for (j = 0; j < ost->forced_kf_count; j++)
3327  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3329  enc_ctx->time_base);
3330 
3331  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3332  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3333  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3334  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3335  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3337  if (!strncmp(ost->enc->name, "libx264", 7) &&
3338  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3341  "No pixel format specified, %s for H.264 encoding chosen.\n"
3342  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3344  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3345  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3348  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3349  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3351  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3352  if (dec_ctx)
3353  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3354  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3355 
3356  enc_ctx->framerate = ost->frame_rate;
3357 
3358  ost->st->avg_frame_rate = ost->frame_rate;
3359 
3360  if (!dec_ctx ||
3361  enc_ctx->width != dec_ctx->width ||
3362  enc_ctx->height != dec_ctx->height ||
3363  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3365  }
3366 
3367  if (ost->forced_keyframes) {
3368  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3371  if (ret < 0) {
3373  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3374  return ret;
3375  }
3380 
3381  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3382  // parse it only for static kf timings
3383  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3385  }
3386  }
3387  break;
3388  case AVMEDIA_TYPE_SUBTITLE:
3389  enc_ctx->time_base = AV_TIME_BASE_Q;
3390  if (!enc_ctx->width) {
3391  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3392  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3393  }
3394  break;
3395  case AVMEDIA_TYPE_DATA:
3396  break;
3397  default:
3398  abort();
3399  break;
3400  }
3401 
3402  ost->mux_timebase = enc_ctx->time_base;
3403 
3404  return 0;
3405 }
3406 
3407 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3408 {
3409  int ret = 0;
3410 
3411  if (ost->encoding_needed) {
3412  AVCodec *codec = ost->enc;
3413  AVCodecContext *dec = NULL;
3414  InputStream *ist;
3415 
3416  ret = init_output_stream_encode(ost);
3417  if (ret < 0)
3418  return ret;
3419 
3420  if ((ist = get_input_stream(ost)))
3421  dec = ist->dec_ctx;
3422  if (dec && dec->subtitle_header) {
3423  /* ASS code assumes this buffer is null terminated so add extra byte. */
3425  if (!ost->enc_ctx->subtitle_header)
3426  return AVERROR(ENOMEM);
3427  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3429  }
3430  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3431  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3432  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3433  !codec->defaults &&
3434  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3435  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3436  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3437 
3438  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3442  if (!ost->enc_ctx->hw_frames_ctx)
3443  return AVERROR(ENOMEM);
3444  }
3445 
3446  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3447  if (ret == AVERROR_EXPERIMENTAL)
3448  abort_codec_experimental(codec, 1);
3449  snprintf(error, error_len,
3450  "Error while opening encoder for output stream #%d:%d - "
3451  "maybe incorrect parameters such as bit_rate, rate, width or height",
3452  ost->file_index, ost->index);
3453  return ret;
3454  }
3455  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3456  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3458  ost->enc_ctx->frame_size);
3460  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3461  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3462  " It takes bits/s as argument, not kbits/s\n");
3463 
3465  if (ret < 0) {
3467  "Error initializing the output stream codec context.\n");
3468  exit_program(1);
3469  }
3470  /*
3471  * FIXME: ost->st->codec should't be needed here anymore.
3472  */
3473  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3474  if (ret < 0)
3475  return ret;
3476 
3477  if (ost->enc_ctx->nb_coded_side_data) {
3478  int i;
3479 
3480  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3481  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3482  uint8_t *dst_data;
3483 
3484  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3485  if (!dst_data)
3486  return AVERROR(ENOMEM);
3487  memcpy(dst_data, sd_src->data, sd_src->size);
3488  }
3489  }
3490 
3491  /*
3492  * Add global input side data. For now this is naive, and copies it
3493  * from the input stream's global side data. All side data should
3494  * really be funneled over AVFrame and libavfilter, then added back to
3495  * packet side data, and then potentially using the first packet for
3496  * global side data.
3497  */
3498  if (ist) {
3499  int i;
3500  for (i = 0; i < ist->st->nb_side_data; i++) {
3501  AVPacketSideData *sd = &ist->st->side_data[i];
3502  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3503  if (!dst)
3504  return AVERROR(ENOMEM);
3505  memcpy(dst, sd->data, sd->size);
3506  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3507  av_display_rotation_set((uint32_t *)dst, 0);
3508  }
3509  }
3510 
3511  // copy timebase while removing common factors
3512  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3513  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3514 
3515  // copy estimated duration as a hint to the muxer
3516  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3517  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3518 
3519  ost->st->codec->codec= ost->enc_ctx->codec;
3520  } else if (ost->stream_copy) {
3521  ret = init_output_stream_streamcopy(ost);
3522  if (ret < 0)
3523  return ret;
3524 
3525  /*
3526  * FIXME: will the codec context used by the parser during streamcopy
3527  * This should go away with the new parser API.
3528  */
3529  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3530  if (ret < 0)
3531  return ret;
3532  }
3533 
3534  // parse user provided disposition, and update stream values
3535  if (ost->disposition) {
3536  static const AVOption opts[] = {
3537  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3538  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3539  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3540  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3541  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3542  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3543  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3544  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3545  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3546  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3547  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3548  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3549  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3550  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3551  { NULL },
3552  };
3553  static const AVClass class = {
3554  .class_name = "",
3555  .item_name = av_default_item_name,
3556  .option = opts,
3557  .version = LIBAVUTIL_VERSION_INT,
3558  };
3559  const AVClass *pclass = &class;
3560 
3561  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3562  if (ret < 0)
3563  return ret;
3564  }
3565 
3566  /* initialize bitstream filters for the output stream
3567  * needs to be done here, because the codec id for streamcopy is not
3568  * known until now */
3569  ret = init_output_bsfs(ost);
3570  if (ret < 0)
3571  return ret;
3572 
3573  ost->initialized = 1;
3574 
3575  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3576  if (ret < 0)
3577  return ret;
3578 
3579  return ret;
3580 }
3581 
3582 static void report_new_stream(int input_index, AVPacket *pkt)
3583 {
3584  InputFile *file = input_files[input_index];
3585  AVStream *st = file->ctx->streams[pkt->stream_index];
3586 
3587  if (pkt->stream_index < file->nb_streams_warn)
3588  return;
3589  av_log(file->ctx, AV_LOG_WARNING,
3590  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3592  input_index, pkt->stream_index,
3593  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3594  file->nb_streams_warn = pkt->stream_index + 1;
3595 }
3596 
3597 static int transcode_init(void)
3598 {
3599  int ret = 0, i, j, k;
3600  AVFormatContext *oc;
3601  OutputStream *ost;
3602  InputStream *ist;
3603  char error[1024] = {0};
3604 
3605  for (i = 0; i < nb_filtergraphs; i++) {
3606  FilterGraph *fg = filtergraphs[i];
3607  for (j = 0; j < fg->nb_outputs; j++) {
3608  OutputFilter *ofilter = fg->outputs[j];
3609  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3610  continue;
3611  if (fg->nb_inputs != 1)
3612  continue;
3613  for (k = nb_input_streams-1; k >= 0 ; k--)
3614  if (fg->inputs[0]->ist == input_streams[k])
3615  break;
3616  ofilter->ost->source_index = k;
3617  }
3618  }
3619 
3620  /* init framerate emulation */
3621  for (i = 0; i < nb_input_files; i++) {
3622  InputFile *ifile = input_files[i];
3623  if (ifile->rate_emu)
3624  for (j = 0; j < ifile->nb_streams; j++)
3625  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3626  }
3627 
3628  /* init input streams */
3629  for (i = 0; i < nb_input_streams; i++)
3630  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3631  for (i = 0; i < nb_output_streams; i++) {
3632  ost = output_streams[i];
3633  avcodec_close(ost->enc_ctx);
3634  }
3635  goto dump_format;
3636  }
3637 
3638  /* open each encoder */
3639  for (i = 0; i < nb_output_streams; i++) {
3640  // skip streams fed from filtergraphs until we have a frame for them
3641  if (output_streams[i]->filter)
3642  continue;
3643 
3644  ret = init_output_stream(output_streams[i], error, sizeof(error));
3645  if (ret < 0)
3646  goto dump_format;
3647  }
3648 
3649  /* discard unused programs */
3650  for (i = 0; i < nb_input_files; i++) {
3651  InputFile *ifile = input_files[i];
3652  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3653  AVProgram *p = ifile->ctx->programs[j];
3654  int discard = AVDISCARD_ALL;
3655 
3656  for (k = 0; k < p->nb_stream_indexes; k++)
3657  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3658  discard = AVDISCARD_DEFAULT;
3659  break;
3660  }
3661  p->discard = discard;
3662  }
3663  }
3664 
3665  /* write headers for files with no streams */
3666  for (i = 0; i < nb_output_files; i++) {
3667  oc = output_files[i]->ctx;
3668  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3669  ret = check_init_output_file(output_files[i], i);
3670  if (ret < 0)
3671  goto dump_format;
3672  }
3673  }
3674 
3675  dump_format:
3676  /* dump the stream mapping */
3677  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3678  for (i = 0; i < nb_input_streams; i++) {
3679  ist = input_streams[i];
3680 
3681  for (j = 0; j < ist->nb_filters; j++) {
3682  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3683  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3684  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3685  ist->filters[j]->name);
3686  if (nb_filtergraphs > 1)
3687  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3688  av_log(NULL, AV_LOG_INFO, "\n");
3689  }
3690  }
3691  }
3692 
3693  for (i = 0; i < nb_output_streams; i++) {
3694  ost = output_streams[i];
3695 
3696  if (ost->attachment_filename) {
3697  /* an attached file */
3698  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3699  ost->attachment_filename, ost->file_index, ost->index);
3700  continue;
3701  }
3702 
3703  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3704  /* output from a complex graph */
3705  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3706  if (nb_filtergraphs > 1)
3707  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3708 
3709  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3710  ost->index, ost->enc ? ost->enc->name : "?");
3711  continue;
3712  }
3713 
3714  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3715  input_streams[ost->source_index]->file_index,
3716  input_streams[ost->source_index]->st->index,
3717  ost->file_index,
3718  ost->index);
3719  if (ost->sync_ist != input_streams[ost->source_index])
3720  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3721  ost->sync_ist->file_index,
3722  ost->sync_ist->st->index);
3723  if (ost->stream_copy)
3724  av_log(NULL, AV_LOG_INFO, " (copy)");
3725  else {
3726  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3727  const AVCodec *out_codec = ost->enc;
3728  const char *decoder_name = "?";
3729  const char *in_codec_name = "?";
3730  const char *encoder_name = "?";
3731  const char *out_codec_name = "?";
3732  const AVCodecDescriptor *desc;
3733 
3734  if (in_codec) {
3735  decoder_name = in_codec->name;
3736  desc = avcodec_descriptor_get(in_codec->id);
3737  if (desc)
3738  in_codec_name = desc->name;
3739  if (!strcmp(decoder_name, in_codec_name))
3740  decoder_name = "native";
3741  }
3742 
3743  if (out_codec) {
3744  encoder_name = out_codec->name;
3745  desc = avcodec_descriptor_get(out_codec->id);
3746  if (desc)
3747  out_codec_name = desc->name;
3748  if (!strcmp(encoder_name, out_codec_name))
3749  encoder_name = "native";
3750  }
3751 
3752  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3753  in_codec_name, decoder_name,
3754  out_codec_name, encoder_name);
3755  }
3756  av_log(NULL, AV_LOG_INFO, "\n");
3757  }
3758 
3759  if (ret) {
3760  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3761  return ret;
3762  }
3763 
3765 
3766  return 0;
3767 }
3768 
3769 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3770 static int need_output(void)
3771 {
3772  int i;
3773 
3774  for (i = 0; i < nb_output_streams; i++) {
3775  OutputStream *ost = output_streams[i];
3776  OutputFile *of = output_files[ost->file_index];
3777  AVFormatContext *os = output_files[ost->file_index]->ctx;
3778 
3779  if (ost->finished ||
3780  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3781  continue;
3782  if (ost->frame_number >= ost->max_frames) {
3783  int j;
3784  for (j = 0; j < of->ctx->nb_streams; j++)
3785  close_output_stream(output_streams[of->ost_index + j]);
3786  continue;
3787  }
3788 
3789  return 1;
3790  }
3791 
3792  return 0;
3793 }
3794 
3795 /**
3796  * Select the output stream to process.
3797  *
3798  * @return selected output stream, or NULL if none available
3799  */
3801 {
3802  int i;
3803  int64_t opts_min = INT64_MAX;
3804  OutputStream *ost_min = NULL;
3805 
3806  for (i = 0; i < nb_output_streams; i++) {
3807  OutputStream *ost = output_streams[i];
3808  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3809  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3810  AV_TIME_BASE_Q);
3811  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3812  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3813 
3814  if (!ost->initialized && !ost->inputs_done)
3815  return ost;
3816 
3817  if (!ost->finished && opts < opts_min) {
3818  opts_min = opts;
3819  ost_min = ost->unavailable ? NULL : ost;
3820  }
3821  }
3822  return ost_min;
3823 }
3824 
3825 static void set_tty_echo(int on)
3826 {
3827 #if HAVE_TERMIOS_H
3828  struct termios tty;
3829  if (tcgetattr(0, &tty) == 0) {
3830  if (on) tty.c_lflag |= ECHO;
3831  else tty.c_lflag &= ~ECHO;
3832  tcsetattr(0, TCSANOW, &tty);
3833  }
3834 #endif
3835 }
3836 
3838 {
3839  int i, ret, key;
3840  static int64_t last_time;
3841  if (received_nb_signals)
3842  return AVERROR_EXIT;
3843  /* read_key() returns 0 on EOF */
3844  if(cur_time - last_time >= 100000 && !run_as_daemon){
3845  key = read_key();
3846  last_time = cur_time;
3847  }else
3848  key = -1;
3849  if (key == 'q')
3850  return AVERROR_EXIT;
3851  if (key == '+') av_log_set_level(av_log_get_level()+10);
3852  if (key == '-') av_log_set_level(av_log_get_level()-10);
3853  if (key == 's') qp_hist ^= 1;
3854  if (key == 'h'){
3855  if (do_hex_dump){
3856  do_hex_dump = do_pkt_dump = 0;
3857  } else if(do_pkt_dump){
3858  do_hex_dump = 1;
3859  } else
3860  do_pkt_dump = 1;
3862  }
3863  if (key == 'c' || key == 'C'){
3864  char buf[4096], target[64], command[256], arg[256] = {0};
3865  double time;
3866  int k, n = 0;
3867  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3868  i = 0;
3869  set_tty_echo(1);
3870  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3871  if (k > 0)
3872  buf[i++] = k;
3873  buf[i] = 0;
3874  set_tty_echo(0);
3875  fprintf(stderr, "\n");
3876  if (k > 0 &&
3877  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3878  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3879  target, time, command, arg);
3880  for (i = 0; i < nb_filtergraphs; i++) {
3881  FilterGraph *fg = filtergraphs[i];
3882  if (fg->graph) {
3883  if (time < 0) {
3884  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3885  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3886  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3887  } else if (key == 'c') {
3888  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3889  ret = AVERROR_PATCHWELCOME;
3890  } else {
3891  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3892  if (ret < 0)
3893  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3894  }
3895  }
3896  }
3897  } else {
3899  "Parse error, at least 3 arguments were expected, "
3900  "only %d given in string '%s'\n", n, buf);
3901  }
3902  }
3903  if (key == 'd' || key == 'D'){
3904  int debug=0;
3905  if(key == 'D') {
3906  debug = input_streams[0]->st->codec->debug<<1;
3907  if(!debug) debug = 1;
3908  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3909  debug += debug;
3910  }else{
3911  char buf[32];
3912  int k = 0;
3913  i = 0;
3914  set_tty_echo(1);
3915  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3916  if (k > 0)
3917  buf[i++] = k;
3918  buf[i] = 0;
3919  set_tty_echo(0);
3920  fprintf(stderr, "\n");
3921  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3922  fprintf(stderr,"error parsing debug value\n");
3923  }
3924  for(i=0;i<nb_input_streams;i++) {
3925  input_streams[i]->st->codec->debug = debug;
3926  }
3927  for(i=0;i<nb_output_streams;i++) {
3928  OutputStream *ost = output_streams[i];
3929  ost->enc_ctx->debug = debug;
3930  }
3931  if(debug) av_log_set_level(AV_LOG_DEBUG);
3932  fprintf(stderr,"debug=%d\n", debug);
3933  }
3934  if (key == '?'){
3935  fprintf(stderr, "key function\n"
3936  "? show this help\n"
3937  "+ increase verbosity\n"
3938  "- decrease verbosity\n"
3939  "c Send command to first matching filter supporting it\n"
3940  "C Send/Queue command to all matching filters\n"
3941  "D cycle through available debug modes\n"
3942  "h dump packets/hex press to cycle through the 3 states\n"
3943  "q quit\n"
3944  "s Show QP histogram\n"
3945  );
3946  }
3947  return 0;
3948 }
3949 
3950 #if HAVE_PTHREADS
3951 static void *input_thread(void *arg)
3952 {
3953  InputFile *f = arg;
3954  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3955  int ret = 0;
3956 
3957  while (1) {
3958  AVPacket pkt;
3959  ret = av_read_frame(f->ctx, &pkt);
3960 
3961  if (ret == AVERROR(EAGAIN)) {
3962  av_usleep(10000);
3963  continue;
3964  }
3965  if (ret < 0) {
3966  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3967  break;
3968  }
3969  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3970  if (flags && ret == AVERROR(EAGAIN)) {
3971  flags = 0;
3972  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3974  "Thread message queue blocking; consider raising the "
3975  "thread_queue_size option (current value: %d)\n",
3976  f->thread_queue_size);
3977  }
3978  if (ret < 0) {
3979  if (ret != AVERROR_EOF)
3980  av_log(f->ctx, AV_LOG_ERROR,
3981  "Unable to send packet to main thread: %s\n",
3982  av_err2str(ret));
3983  av_packet_unref(&pkt);
3984  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3985  break;
3986  }
3987  }
3988 
3989  return NULL;
3990 }
3991 
3992 static void free_input_threads(void)
3993 {
3994  int i;
3995 
3996  for (i = 0; i < nb_input_files; i++) {
3997  InputFile *f = input_files[i];
3998  AVPacket pkt;
3999 
4000  if (!f || !f->in_thread_queue)
4001  continue;
4003  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4004  av_packet_unref(&pkt);
4005 
4006  pthread_join(f->thread, NULL);
4007  f->joined = 1;
4008  av_thread_message_queue_free(&f->in_thread_queue);
4009  }
4010 }
4011 
4012 static int init_input_threads(void)
4013 {
4014  int i, ret;
4015 
4016  if (nb_input_files == 1)
4017  return 0;
4018 
4019  for (i = 0; i < nb_input_files; i++) {
4020  InputFile *f = input_files[i];
4021 
4022  if (f->ctx->pb ? !f->ctx->pb->seekable :
4023  strcmp(f->ctx->iformat->name, "lavfi"))
4024  f->non_blocking = 1;
4025  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4026  f->thread_queue_size, sizeof(AVPacket));
4027  if (ret < 0)
4028  return ret;
4029 
4030  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4031  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4032  av_thread_message_queue_free(&f->in_thread_queue);
4033  return AVERROR(ret);
4034  }
4035  }
4036  return 0;
4037 }
4038 
4039 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4040 {
4041  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4042  f->non_blocking ?
4044 }
4045 #endif
4046 
4048 {
4049  if (f->rate_emu) {
4050  int i;
4051  for (i = 0; i < f->nb_streams; i++) {
4052  InputStream *ist = input_streams[f->ist_index + i];
4053  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4054  int64_t now = av_gettime_relative() - ist->start;
4055  if (pts > now)
4056  return AVERROR(EAGAIN);
4057  }
4058  }
4059 
4060 #if HAVE_PTHREADS
4061  if (nb_input_files > 1)
4062  return get_input_packet_mt(f, pkt);
4063 #endif
4064  return av_read_frame(f->ctx, pkt);
4065 }
4066 
4067 static int got_eagain(void)
4068 {
4069  int i;
4070  for (i = 0; i < nb_output_streams; i++)
4071  if (output_streams[i]->unavailable)
4072  return 1;
4073  return 0;
4074 }
4075 
4076 static void reset_eagain(void)
4077 {
4078  int i;
4079  for (i = 0; i < nb_input_files; i++)
4080  input_files[i]->eagain = 0;
4081  for (i = 0; i < nb_output_streams; i++)
4082  output_streams[i]->unavailable = 0;
4083 }
4084 
4085 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4086 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4087  AVRational time_base)
4088 {
4089  int ret;
4090 
4091  if (!*duration) {
4092  *duration = tmp;
4093  return tmp_time_base;
4094  }
4095 
4096  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4097  if (ret < 0) {
4098  *duration = tmp;
4099  return tmp_time_base;
4100  }
4101 
4102  return time_base;
4103 }
4104 
4106 {
4107  InputStream *ist;
4108  AVCodecContext *avctx;
4109  int i, ret, has_audio = 0;
4110  int64_t duration = 0;
4111 
4112  ret = av_seek_frame(is, -1, is->start_time, 0);
4113  if (ret < 0)
4114  return ret;
4115 
4116  for (i = 0; i < ifile->nb_streams; i++) {
4117  ist = input_streams[ifile->ist_index + i];
4118  avctx = ist->dec_ctx;
4119 
4120  // flush decoders
4121  if (ist->decoding_needed) {
4122  process_input_packet(ist, NULL, 1);
4123  avcodec_flush_buffers(avctx);
4124  }
4125 
4126  /* duration is the length of the last frame in a stream
4127  * when audio stream is present we don't care about
4128  * last video frame length because it's not defined exactly */
4129  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4130  has_audio = 1;
4131  }
4132 
4133  for (i = 0; i < ifile->nb_streams; i++) {
4134  ist = input_streams[ifile->ist_index + i];
4135  avctx = ist->dec_ctx;
4136 
4137  if (has_audio) {
4138  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4139  AVRational sample_rate = {1, avctx->sample_rate};
4140 
4141  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4142  } else
4143  continue;
4144  } else {
4145  if (ist->framerate.num) {
4146  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4147  } else if (ist->st->avg_frame_rate.num) {
4148  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4149  } else duration = 1;
4150  }
4151  if (!ifile->duration)
4152  ifile->time_base = ist->st->time_base;
4153  /* the total duration of the stream, max_pts - min_pts is
4154  * the duration of the stream without the last frame */
4155  duration += ist->max_pts - ist->min_pts;
4156  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4157  ifile->time_base);
4158  }
4159 
4160  if (ifile->loop > 0)
4161  ifile->loop--;
4162 
4163  return ret;
4164 }
4165 
4166 /*
4167  * Return
4168  * - 0 -- one packet was read and processed
4169  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4170  * this function should be called again
4171  * - AVERROR_EOF -- this function should not be called again
4172  */
4173 static int process_input(int file_index)
4174 {
4175  InputFile *ifile = input_files[file_index];
4176  AVFormatContext *is;
4177  InputStream *ist;
4178  AVPacket pkt;
4179  int ret, i, j;
4180  int64_t duration;
4181  int64_t pkt_dts;
4182 
4183  is = ifile->ctx;
4184  ret = get_input_packet(ifile, &pkt);
4185 
4186  if (ret == AVERROR(EAGAIN)) {
4187  ifile->eagain = 1;
4188  return ret;
4189  }
4190  if (ret < 0 && ifile->loop) {
4191  if ((ret = seek_to_start(ifile, is)) < 0)
4192  return ret;
4193  ret = get_input_packet(ifile, &pkt);
4194  if (ret == AVERROR(EAGAIN)) {
4195  ifile->eagain = 1;
4196  return ret;
4197  }
4198  }
4199  if (ret < 0) {
4200  if (ret != AVERROR_EOF) {
4201  print_error(is->filename, ret);
4202  if (exit_on_error)
4203  exit_program(1);
4204  }
4205 
4206  for (i = 0; i < ifile->nb_streams; i++) {
4207  ist = input_streams[ifile->ist_index + i];
4208  if (ist->decoding_needed) {
4209  ret = process_input_packet(ist, NULL, 0);
4210  if (ret>0)
4211  return 0;
4212  }
4213 
4214  /* mark all outputs that don't go through lavfi as finished */
4215  for (j = 0; j < nb_output_streams; j++) {
4216  OutputStream *ost = output_streams[j];
4217 
4218  if (ost->source_index == ifile->ist_index + i &&
4219  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4220  finish_output_stream(ost);
4221  }
4222  }
4223 
4224  ifile->eof_reached = 1;
4225  return AVERROR(EAGAIN);
4226  }
4227 
4228  reset_eagain();
4229 
4230  if (do_pkt_dump) {
4232  is->streams[pkt.stream_index]);
4233  }
4234  /* the following test is needed in case new streams appear
4235  dynamically in stream : we ignore them */
4236  if (pkt.stream_index >= ifile->nb_streams) {
4237  report_new_stream(file_index, &pkt);
4238  goto discard_packet;
4239  }
4240 
4241  ist = input_streams[ifile->ist_index + pkt.stream_index];
4242 
4243  ist->data_size += pkt.size;
4244  ist->nb_packets++;
4245 
4246  if (ist->discard)
4247  goto discard_packet;
4248 
4249  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4250  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4251  exit_program(1);
4252  }
4253 
4254  if (debug_ts) {
4255  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4256  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4260  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4261  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4262  av_ts2str(input_files[ist->file_index]->ts_offset),
4263  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4264  }
4265 
4266  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4267  int64_t stime, stime2;
4268  // Correcting starttime based on the enabled streams
4269  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4270  // so we instead do it here as part of discontinuity handling
4271  if ( ist->next_dts == AV_NOPTS_VALUE
4272  && ifile->ts_offset == -is->start_time
4273  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4274  int64_t new_start_time = INT64_MAX;
4275  for (i=0; i<is->nb_streams; i++) {
4276  AVStream *st = is->streams[i];
4277  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4278  continue;
4279  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4280  }
4281  if (new_start_time > is->start_time) {
4282  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4283  ifile->ts_offset = -new_start_time;
4284  }
4285  }
4286 
4287  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4288  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4289  ist->wrap_correction_done = 1;
4290 
4291  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4292  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4293  ist->wrap_correction_done = 0;
4294  }
4295  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4296  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4297  ist->wrap_correction_done = 0;
4298  }
4299  }
4300 
4301  /* add the stream-global side data to the first packet */
4302  if (ist->nb_packets == 1) {
4303  for (i = 0; i < ist->st->nb_side_data; i++) {
4304  AVPacketSideData *src_sd = &ist->st->side_data[i];
4305  uint8_t *dst_data;
4306 
4307  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4308  continue;
4309 
4310  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4311  continue;
4312 
4313  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4314  if (!dst_data)
4315  exit_program(1);
4316 
4317  memcpy(dst_data, src_sd->data, src_sd->size);
4318  }
4319  }
4320 
4321  if (pkt.dts != AV_NOPTS_VALUE)
4322  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4323  if (pkt.pts != AV_NOPTS_VALUE)
4324  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4325 
4326  if (pkt.pts != AV_NOPTS_VALUE)
4327  pkt.pts *= ist->ts_scale;
4328  if (pkt.dts != AV_NOPTS_VALUE)
4329  pkt.dts *= ist->ts_scale;
4330 
4332  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4334  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4335  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4336  int64_t delta = pkt_dts - ifile->last_ts;
4337  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4338  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4339  ifile->ts_offset -= delta;
4341  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4342  delta, ifile->ts_offset);
4343  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4344  if (pkt.pts != AV_NOPTS_VALUE)
4345  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4346  }
4347  }
4348 
4349  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4350  if (pkt.pts != AV_NOPTS_VALUE) {
4351  pkt.pts += duration;
4352  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4353  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4354  }
4355 
4356  if (pkt.dts != AV_NOPTS_VALUE)
4357  pkt.dts += duration;
4358 
4360  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4362  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4363  !copy_ts) {
4364  int64_t delta = pkt_dts - ist->next_dts;
4365  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4366  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4367  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4368  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4369  ifile->ts_offset -= delta;
4371  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4372  delta, ifile->ts_offset);
4373  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4374  if (pkt.pts != AV_NOPTS_VALUE)
4375  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4376  }
4377  } else {
4378  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4379  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4380  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4381  pkt.dts = AV_NOPTS_VALUE;
4382  }
4383  if (pkt.pts != AV_NOPTS_VALUE){
4384  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4385  delta = pkt_pts - ist->next_dts;
4386  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4387  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4388  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4389  pkt.pts = AV_NOPTS_VALUE;
4390  }
4391  }
4392  }
4393  }
4394 
4395  if (pkt.dts != AV_NOPTS_VALUE)
4396  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4397 
4398  if (debug_ts) {
4399  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4401  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4402  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4403  av_ts2str(input_files[ist->file_index]->ts_offset),
4404  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4405  }
4406 
4407  sub2video_heartbeat(ist, pkt.pts);
4408 
4409  process_input_packet(ist, &pkt, 0);
4410 
4411 discard_packet:
4412  av_packet_unref(&pkt);
4413 
4414  return 0;
4415 }
4416 
4417 /**
4418  * Perform a step of transcoding for the specified filter graph.
4419  *
4420  * @param[in] graph filter graph to consider
4421  * @param[out] best_ist input stream where a frame would allow to continue
4422  * @return 0 for success, <0 for error
4423  */
4424 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4425 {
4426  int i, ret;
4427  int nb_requests, nb_requests_max = 0;
4428  InputFilter *ifilter;
4429  InputStream *ist;
4430 
4431  *best_ist = NULL;
4432  ret = avfilter_graph_request_oldest(graph->graph);
4433  if (ret >= 0)
4434  return reap_filters(0);
4435 
4436  if (ret == AVERROR_EOF) {
4437  ret = reap_filters(1);
4438  for (i = 0; i < graph->nb_outputs; i++)
4439  close_output_stream(graph->outputs[i]->ost);
4440  return ret;
4441  }
4442  if (ret != AVERROR(EAGAIN))
4443  return ret;
4444 
4445  for (i = 0; i < graph->nb_inputs; i++) {
4446  ifilter = graph->inputs[i];
4447  ist = ifilter->ist;
4448  if (input_files[ist->file_index]->eagain ||
4449  input_files[ist->file_index]->eof_reached)
4450  continue;
4451  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4452  if (nb_requests > nb_requests_max) {
4453  nb_requests_max = nb_requests;
4454  *best_ist = ist;
4455  }
4456  }
4457 
4458  if (!*best_ist)
4459  for (i = 0; i < graph->nb_outputs; i++)
4460  graph->outputs[i]->ost->unavailable = 1;
4461 
4462  return 0;
4463 }
4464 
4465 /**
4466  * Run a single step of transcoding.
4467  *
4468  * @return 0 for success, <0 for error
4469  */
4470 static int transcode_step(void)
4471 {
4472  OutputStream *ost;
4473  InputStream *ist = NULL;
4474  int ret;
4475 
4476  ost = choose_output();
4477  if (!ost) {
4478  if (got_eagain()) {
4479  reset_eagain();
4480  av_usleep(10000);
4481  return 0;
4482  }
4483  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4484  return AVERROR_EOF;
4485  }
4486 
4487  if (ost->filter && !ost->filter->graph->graph) {
4489  ret = configure_filtergraph(ost->filter->graph);
4490  if (ret < 0) {
4491  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4492  return ret;
4493  }
4494  }
4495  }
4496 
4497  if (ost->filter && ost->filter->graph->graph) {
4498  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4499  return ret;
4500  if (!ist)
4501  return 0;
4502  } else if (ost->filter) {
4503  int i;
4504  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4505  InputFilter *ifilter = ost->filter->graph->inputs[i];
4506  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4507  ist = ifilter->ist;
4508  break;
4509  }
4510  }
4511  if (!ist) {
4512  ost->inputs_done = 1;
4513  return 0;
4514  }
4515  } else {
4516  av_assert0(ost->source_index >= 0);
4517  ist = input_streams[ost->source_index];
4518  }
4519 
4520  ret = process_input(ist->file_index);
4521  if (ret == AVERROR(EAGAIN)) {
4522  if (input_files[ist->file_index]->eagain)
4523  ost->unavailable = 1;
4524  return 0;
4525  }
4526 
4527  if (ret < 0)
4528  return ret == AVERROR_EOF ? 0 : ret;
4529 
4530  return reap_filters(0);
4531 }
4532 
4533 /*
4534  * The following code is the main loop of the file converter
4535  */
4536 static int transcode(void)
4537 {
4538  int ret, i;
4539  AVFormatContext *os;
4540  OutputStream *ost;
4541  InputStream *ist;
4542  int64_t timer_start;
4543  int64_t total_packets_written = 0;
4544 
4545  ret = transcode_init();
4546  if (ret < 0)
4547  goto fail;
4548 
4549  if (stdin_interaction) {
4550  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4551  }
4552 
4553  timer_start = av_gettime_relative();
4554 
4555 #if HAVE_PTHREADS
4556  if ((ret = init_input_threads()) < 0)
4557  goto fail;
4558 #endif
4559 
4560  while (!received_sigterm) {
4561  int64_t cur_time= av_gettime_relative();
4562 
4563  /* if 'q' pressed, exits */
4564  if (stdin_interaction)
4565  if (check_keyboard_interaction(cur_time) < 0)
4566  break;
4567 
4568  /* check if there's any stream where output is still needed */
4569  if (!need_output()) {
4570  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4571  break;
4572  }
4573 
4574  ret = transcode_step();
4575  if (ret < 0 && ret != AVERROR_EOF) {
4576  char errbuf[128];
4577  av_strerror(ret, errbuf, sizeof(errbuf));
4578 
4579  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4580  break;
4581  }
4582 
4583  /* dump report by using the output first video and audio streams */
4584  print_report(0, timer_start, cur_time);
4585  }
4586 #if HAVE_PTHREADS
4587  free_input_threads();
4588 #endif
4589 
4590  /* at the end of stream, we must flush the decoder buffers */
4591  for (i = 0; i < nb_input_streams; i++) {
4592  ist = input_streams[i];
4593  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4594  process_input_packet(ist, NULL, 0);
4595  }
4596  }
4597  flush_encoders();
4598 
4599  term_exit();
4600 
4601  /* write the trailer if needed and close file */
4602  for (i = 0; i < nb_output_files; i++) {
4603  os = output_files[i]->ctx;
4604  if (!output_files[i]->header_written) {
4606  "Nothing was written into output file %d (%s), because "
4607  "at least one of its streams received no packets.\n",
4608  i, os->filename);
4609  continue;
4610  }
4611  if ((ret = av_write_trailer(os)) < 0) {
4612  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4613  if (exit_on_error)
4614  exit_program(1);
4615  }
4616  }
4617 
4618  /* dump report by using the first video and audio streams */
4619  print_report(1, timer_start, av_gettime_relative());
4620 
4621  /* close each encoder */
4622  for (i = 0; i < nb_output_streams; i++) {
4623  ost = output_streams[i];
4624  if (ost->encoding_needed) {
4625  av_freep(&ost->enc_ctx->stats_in);
4626  }
4627  total_packets_written += ost->packets_written;
4628  }
4629 
4630  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4631  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4632  exit_program(1);
4633  }
4634 
4635  /* close each decoder */
4636  for (i = 0; i < nb_input_streams; i++) {
4637  ist = input_streams[i];
4638  if (ist->decoding_needed) {
4639  avcodec_close(ist->dec_ctx);
4640  if (ist->hwaccel_uninit)
4641  ist->hwaccel_uninit(ist->dec_ctx);
4642  }
4643  }
4644 
4646 
4647  /* finished ! */
4648  ret = 0;
4649 
4650  fail:
4651 #if HAVE_PTHREADS
4652  free_input_threads();
4653 #endif
4654 
4655  if (output_streams) {
4656  for (i = 0; i < nb_output_streams; i++) {
4657  ost = output_streams[i];
4658  if (ost) {
4659  if (ost->logfile) {
4660  if (fclose(ost->logfile))
4662  "Error closing logfile, loss of information possible: %s\n",
4663  av_err2str(AVERROR(errno)));
4664  ost->logfile = NULL;
4665  }
4666  av_freep(&ost->forced_kf_pts);
4667  av_freep(&ost->apad);
4668  av_freep(&ost->disposition);
4669  av_dict_free(&ost->