FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570 
573 
574  if (ost->muxing_queue) {
575  while (av_fifo_size(ost->muxing_queue)) {
576  AVPacket pkt;
577  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578  av_packet_unref(&pkt);
579  }
581  }
582 
583  av_freep(&output_streams[i]);
584  }
585 #if HAVE_THREADS
586  free_input_threads();
587 #endif
588  for (i = 0; i < nb_input_files; i++) {
589  avformat_close_input(&input_files[i]->ctx);
590  av_freep(&input_files[i]);
591  }
592  for (i = 0; i < nb_input_streams; i++) {
593  InputStream *ist = input_streams[i];
594 
597  av_dict_free(&ist->decoder_opts);
600  av_freep(&ist->filters);
601  av_freep(&ist->hwaccel_device);
602  av_freep(&ist->dts_buffer);
603 
605 
606  av_freep(&input_streams[i]);
607  }
608 
609  if (vstats_file) {
610  if (fclose(vstats_file))
612  "Error closing vstats file, loss of information possible: %s\n",
613  av_err2str(AVERROR(errno)));
614  }
616 
617  av_freep(&input_streams);
618  av_freep(&input_files);
619  av_freep(&output_streams);
620  av_freep(&output_files);
621 
622  uninit_opts();
623 
625 
626  if (received_sigterm) {
627  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628  (int) received_sigterm);
629  } else if (ret && atomic_load(&transcode_init_done)) {
630  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
631  }
632  term_exit();
633  ffmpeg_exited = 1;
634 }
635 
637 {
638  AVDictionaryEntry *t = NULL;
639 
640  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642  }
643 }
644 
646 {
648  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650  exit_program(1);
651  }
652 }
653 
654 static void abort_codec_experimental(AVCodec *c, int encoder)
655 {
656  exit_program(1);
657 }
658 
659 static void update_benchmark(const char *fmt, ...)
660 {
661  if (do_benchmark_all) {
663  va_list va;
664  char buf[1024];
665 
666  if (fmt) {
667  va_start(va, fmt);
668  vsnprintf(buf, sizeof(buf), fmt, va);
669  va_end(va);
671  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672  t.user_usec - current_time.user_usec,
673  t.sys_usec - current_time.sys_usec,
674  t.real_usec - current_time.real_usec, buf);
675  }
676  current_time = t;
677  }
678 }
679 
681 {
682  int i;
683  for (i = 0; i < nb_output_streams; i++) {
684  OutputStream *ost2 = output_streams[i];
685  ost2->finished |= ost == ost2 ? this_stream : others;
686  }
687 }
688 
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
690 {
691  AVFormatContext *s = of->ctx;
692  AVStream *st = ost->st;
693  int ret;
694 
695  /*
696  * Audio encoders may split the packets -- #frames in != #packets out.
697  * But there is no reordering, so we can limit the number of output packets
698  * by simply dropping them here.
699  * Counting encoded video frames needs to be done separately because of
700  * reordering, see do_video_out().
701  * Do not count the packet when unqueued because it has been counted when queued.
702  */
703  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704  if (ost->frame_number >= ost->max_frames) {
705  av_packet_unref(pkt);
706  return;
707  }
708  ost->frame_number++;
709  }
710 
711  if (!of->header_written) {
712  AVPacket tmp_pkt = {0};
713  /* the muxer is not initialized yet, buffer the packet */
714  if (!av_fifo_space(ost->muxing_queue)) {
715  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716  ost->max_muxing_queue_size);
717  if (new_size <= av_fifo_size(ost->muxing_queue)) {
719  "Too many packets buffered for output stream %d:%d.\n",
720  ost->file_index, ost->st->index);
721  exit_program(1);
722  }
723  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
724  if (ret < 0)
725  exit_program(1);
726  }
727  ret = av_packet_ref(&tmp_pkt, pkt);
728  if (ret < 0)
729  exit_program(1);
730  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
731  av_packet_unref(pkt);
732  return;
733  }
734 
737  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
738 
739  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
740  int i;
742  NULL);
743  ost->quality = sd ? AV_RL32(sd) : -1;
744  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
745 
746  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
747  if (sd && i < sd[5])
748  ost->error[i] = AV_RL64(sd + 8 + 8*i);
749  else
750  ost->error[i] = -1;
751  }
752 
753  if (ost->frame_rate.num && ost->is_cfr) {
754  if (pkt->duration > 0)
755  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
757  ost->mux_timebase);
758  }
759  }
760 
761  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
762 
763  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764  if (pkt->dts != AV_NOPTS_VALUE &&
765  pkt->pts != AV_NOPTS_VALUE &&
766  pkt->dts > pkt->pts) {
767  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
768  pkt->dts, pkt->pts,
769  ost->file_index, ost->st->index);
770  pkt->pts =
771  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
774  }
776  pkt->dts != AV_NOPTS_VALUE &&
777  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778  ost->last_mux_dts != AV_NOPTS_VALUE) {
779  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780  if (pkt->dts < max) {
781  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782  av_log(s, loglevel, "Non-monotonous DTS in output stream "
783  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
785  if (exit_on_error) {
786  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
787  exit_program(1);
788  }
789  av_log(s, loglevel, "changing to %"PRId64". This may result "
790  "in incorrect timestamps in the output file.\n",
791  max);
792  if (pkt->pts >= pkt->dts)
793  pkt->pts = FFMAX(pkt->pts, max);
794  pkt->dts = max;
795  }
796  }
797  }
798  ost->last_mux_dts = pkt->dts;
799 
800  ost->data_size += pkt->size;
801  ost->packets_written++;
802 
803  pkt->stream_index = ost->index;
804 
805  if (debug_ts) {
806  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
811  pkt->size
812  );
813  }
814 
815  ret = av_interleaved_write_frame(s, pkt);
816  if (ret < 0) {
817  print_error("av_interleaved_write_frame()", ret);
818  main_return_code = 1;
820  }
821  av_packet_unref(pkt);
822 }
823 
825 {
826  OutputFile *of = output_files[ost->file_index];
827 
828  ost->finished |= ENCODER_FINISHED;
829  if (of->shortest) {
830  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831  of->recording_time = FFMIN(of->recording_time, end);
832  }
833 }
834 
835 /*
836  * Send a single packet to the output, applying any bitstream filters
837  * associated with the output stream. This may result in any number
838  * of packets actually being written, depending on what bitstream
839  * filters are applied. The supplied packet is consumed and will be
840  * blank (as if newly-allocated) when this function returns.
841  *
842  * If eof is set, instead indicate EOF to all bitstream filters and
843  * therefore flush any delayed packets to the output. A blank packet
844  * must be supplied in this case.
845  */
847  OutputStream *ost, int eof)
848 {
849  int ret = 0;
850 
851  /* apply the output bitstream filters, if any */
852  if (ost->nb_bitstream_filters) {
853  int idx;
854 
855  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
856  if (ret < 0)
857  goto finish;
858 
859  eof = 0;
860  idx = 1;
861  while (idx) {
862  /* get a packet from the previous filter up the chain */
863  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864  if (ret == AVERROR(EAGAIN)) {
865  ret = 0;
866  idx--;
867  continue;
868  } else if (ret == AVERROR_EOF) {
869  eof = 1;
870  } else if (ret < 0)
871  goto finish;
872 
873  /* send it to the next filter down the chain or to the muxer */
874  if (idx < ost->nb_bitstream_filters) {
875  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
876  if (ret < 0)
877  goto finish;
878  idx++;
879  eof = 0;
880  } else if (eof)
881  goto finish;
882  else
883  write_packet(of, pkt, ost, 0);
884  }
885  } else if (!eof)
886  write_packet(of, pkt, ost, 0);
887 
888 finish:
889  if (ret < 0 && ret != AVERROR_EOF) {
890  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
892  if(exit_on_error)
893  exit_program(1);
894  }
895 }
896 
898 {
899  OutputFile *of = output_files[ost->file_index];
900 
901  if (of->recording_time != INT64_MAX &&
903  AV_TIME_BASE_Q) >= 0) {
904  close_output_stream(ost);
905  return 0;
906  }
907  return 1;
908 }
909 
911  AVFrame *frame)
912 {
913  AVCodecContext *enc = ost->enc_ctx;
914  AVPacket pkt;
915  int ret;
916 
917  av_init_packet(&pkt);
918  pkt.data = NULL;
919  pkt.size = 0;
920 
921  if (!check_recording_time(ost))
922  return;
923 
924  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925  frame->pts = ost->sync_opts;
926  ost->sync_opts = frame->pts + frame->nb_samples;
927  ost->samples_encoded += frame->nb_samples;
928  ost->frames_encoded++;
929 
930  av_assert0(pkt.size || !pkt.data);
932  if (debug_ts) {
933  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936  enc->time_base.num, enc->time_base.den);
937  }
938 
939  ret = avcodec_send_frame(enc, frame);
940  if (ret < 0)
941  goto error;
942 
943  while (1) {
944  ret = avcodec_receive_packet(enc, &pkt);
945  if (ret == AVERROR(EAGAIN))
946  break;
947  if (ret < 0)
948  goto error;
949 
950  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
951 
952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
953 
954  if (debug_ts) {
955  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
959  }
960 
961  output_packet(of, &pkt, ost, 0);
962  }
963 
964  return;
965 error:
966  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
967  exit_program(1);
968 }
969 
970 static void do_subtitle_out(OutputFile *of,
971  OutputStream *ost,
972  AVSubtitle *sub)
973 {
974  int subtitle_out_max_size = 1024 * 1024;
975  int subtitle_out_size, nb, i;
976  AVCodecContext *enc;
977  AVPacket pkt;
978  int64_t pts;
979 
980  if (sub->pts == AV_NOPTS_VALUE) {
981  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982  if (exit_on_error)
983  exit_program(1);
984  return;
985  }
986 
987  enc = ost->enc_ctx;
988 
989  if (!subtitle_out) {
990  subtitle_out = av_malloc(subtitle_out_max_size);
991  if (!subtitle_out) {
992  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
993  exit_program(1);
994  }
995  }
996 
997  /* Note: DVB subtitle need one packet to draw them and one other
998  packet to clear them */
999  /* XXX: signal it in the codec context ? */
1000  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1001  nb = 2;
1002  else
1003  nb = 1;
1004 
1005  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1006  pts = sub->pts;
1007  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008  pts -= output_files[ost->file_index]->start_time;
1009  for (i = 0; i < nb; i++) {
1010  unsigned save_num_rects = sub->num_rects;
1011 
1012  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  sub->pts = pts;
1017  // start_display_time is required to be 0
1018  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019  sub->end_display_time -= sub->start_display_time;
1020  sub->start_display_time = 0;
1021  if (i == 1)
1022  sub->num_rects = 0;
1023 
1024  ost->frames_encoded++;
1025 
1026  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027  subtitle_out_max_size, sub);
1028  if (i == 1)
1029  sub->num_rects = save_num_rects;
1030  if (subtitle_out_size < 0) {
1031  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1032  exit_program(1);
1033  }
1034 
1035  av_init_packet(&pkt);
1036  pkt.data = subtitle_out;
1037  pkt.size = subtitle_out_size;
1038  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041  /* XXX: the pts correction is handled here. Maybe handling
1042  it in the codec would be better */
1043  if (i == 0)
1044  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1045  else
1046  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047  }
1048  pkt.dts = pkt.pts;
1049  output_packet(of, &pkt, ost, 0);
1050  }
1051 }
1052 
1053 static void do_video_out(OutputFile *of,
1054  OutputStream *ost,
1055  AVFrame *next_picture,
1056  double sync_ipts)
1057 {
1058  int ret, format_video_sync;
1059  AVPacket pkt;
1060  AVCodecContext *enc = ost->enc_ctx;
1061  AVCodecParameters *mux_par = ost->st->codecpar;
1062  AVRational frame_rate;
1063  int nb_frames, nb0_frames, i;
1064  double delta, delta0;
1065  double duration = 0;
1066  int frame_size = 0;
1067  InputStream *ist = NULL;
1069 
1070  if (ost->source_index >= 0)
1071  ist = input_streams[ost->source_index];
1072 
1073  frame_rate = av_buffersink_get_frame_rate(filter);
1074  if (frame_rate.num > 0 && frame_rate.den > 0)
1075  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1076 
1077  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1079 
1080  if (!ost->filters_script &&
1081  !ost->filters &&
1082  next_picture &&
1083  ist &&
1084  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1085  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1086  }
1087 
1088  if (!next_picture) {
1089  //end, flushing
1090  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1091  ost->last_nb0_frames[1],
1092  ost->last_nb0_frames[2]);
1093  } else {
1094  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1095  delta = delta0 + duration;
1096 
1097  /* by default, we output a single frame */
1098  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1099  nb_frames = 1;
1100 
1101  format_video_sync = video_sync_method;
1102  if (format_video_sync == VSYNC_AUTO) {
1103  if(!strcmp(of->ctx->oformat->name, "avi")) {
1104  format_video_sync = VSYNC_VFR;
1105  } else
1106  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1107  if ( ist
1108  && format_video_sync == VSYNC_CFR
1109  && input_files[ist->file_index]->ctx->nb_streams == 1
1110  && input_files[ist->file_index]->input_ts_offset == 0) {
1111  format_video_sync = VSYNC_VSCFR;
1112  }
1113  if (format_video_sync == VSYNC_CFR && copy_ts) {
1114  format_video_sync = VSYNC_VSCFR;
1115  }
1116  }
1117  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1118 
1119  if (delta0 < 0 &&
1120  delta > 0 &&
1121  format_video_sync != VSYNC_PASSTHROUGH &&
1122  format_video_sync != VSYNC_DROP) {
1123  if (delta0 < -0.6) {
1124  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1125  } else
1126  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1127  sync_ipts = ost->sync_opts;
1128  duration += delta0;
1129  delta0 = 0;
1130  }
1131 
1132  switch (format_video_sync) {
1133  case VSYNC_VSCFR:
1134  if (ost->frame_number == 0 && delta0 >= 0.5) {
1135  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1136  delta = duration;
1137  delta0 = 0;
1138  ost->sync_opts = lrint(sync_ipts);
1139  }
1140  case VSYNC_CFR:
1141  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1142  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1143  nb_frames = 0;
1144  } else if (delta < -1.1)
1145  nb_frames = 0;
1146  else if (delta > 1.1) {
1147  nb_frames = lrintf(delta);
1148  if (delta0 > 1.1)
1149  nb0_frames = lrintf(delta0 - 0.6);
1150  }
1151  break;
1152  case VSYNC_VFR:
1153  if (delta <= -0.6)
1154  nb_frames = 0;
1155  else if (delta > 0.6)
1156  ost->sync_opts = lrint(sync_ipts);
1157  break;
1158  case VSYNC_DROP:
1159  case VSYNC_PASSTHROUGH:
1160  ost->sync_opts = lrint(sync_ipts);
1161  break;
1162  default:
1163  av_assert0(0);
1164  }
1165  }
1166 
1167  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1168  nb0_frames = FFMIN(nb0_frames, nb_frames);
1169 
1170  memmove(ost->last_nb0_frames + 1,
1171  ost->last_nb0_frames,
1172  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1173  ost->last_nb0_frames[0] = nb0_frames;
1174 
1175  if (nb0_frames == 0 && ost->last_dropped) {
1176  nb_frames_drop++;
1178  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1179  ost->frame_number, ost->st->index, ost->last_frame->pts);
1180  }
1181  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1182  if (nb_frames > dts_error_threshold * 30) {
1183  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1184  nb_frames_drop++;
1185  return;
1186  }
1187  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1188  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1189  if (nb_frames_dup > dup_warning) {
1190  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1191  dup_warning *= 10;
1192  }
1193  }
1194  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1195 
1196  /* duplicates frame if needed */
1197  for (i = 0; i < nb_frames; i++) {
1198  AVFrame *in_picture;
1199  av_init_packet(&pkt);
1200  pkt.data = NULL;
1201  pkt.size = 0;
1202 
1203  if (i < nb0_frames && ost->last_frame) {
1204  in_picture = ost->last_frame;
1205  } else
1206  in_picture = next_picture;
1207 
1208  if (!in_picture)
1209  return;
1210 
1211  in_picture->pts = ost->sync_opts;
1212 
1213 #if 1
1214  if (!check_recording_time(ost))
1215 #else
1216  if (ost->frame_number >= ost->max_frames)
1217 #endif
1218  return;
1219 
1220  {
1221  int forced_keyframe = 0;
1222  double pts_time;
1223 
1225  ost->top_field_first >= 0)
1226  in_picture->top_field_first = !!ost->top_field_first;
1227 
1228  if (in_picture->interlaced_frame) {
1229  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1231  else
1232  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1233  } else
1234  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1235 
1236  in_picture->quality = enc->global_quality;
1237  in_picture->pict_type = 0;
1238 
1239  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1240  in_picture->pts != AV_NOPTS_VALUE)
1241  ost->forced_kf_ref_pts = in_picture->pts;
1242 
1243  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1244  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1245  if (ost->forced_kf_index < ost->forced_kf_count &&
1246  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1247  ost->forced_kf_index++;
1248  forced_keyframe = 1;
1249  } else if (ost->forced_keyframes_pexpr) {
1250  double res;
1251  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1254  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1260  res);
1261  if (res) {
1262  forced_keyframe = 1;
1268  }
1269 
1271  } else if ( ost->forced_keyframes
1272  && !strncmp(ost->forced_keyframes, "source", 6)
1273  && in_picture->key_frame==1) {
1274  forced_keyframe = 1;
1275  }
1276 
1277  if (forced_keyframe) {
1278  in_picture->pict_type = AV_PICTURE_TYPE_I;
1279  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1280  }
1281 
1283  if (debug_ts) {
1284  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1285  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1286  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1287  enc->time_base.num, enc->time_base.den);
1288  }
1289 
1290  ost->frames_encoded++;
1291 
1292  ret = avcodec_send_frame(enc, in_picture);
1293  if (ret < 0)
1294  goto error;
1295 
1296  while (1) {
1297  ret = avcodec_receive_packet(enc, &pkt);
1298  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1299  if (ret == AVERROR(EAGAIN))
1300  break;
1301  if (ret < 0)
1302  goto error;
1303 
1304  if (debug_ts) {
1305  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1308  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1309  }
1310 
1311  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1312  pkt.pts = ost->sync_opts;
1313 
1314  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1315 
1316  if (debug_ts) {
1317  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1318  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1319  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1320  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1321  }
1322 
1323  frame_size = pkt.size;
1324  output_packet(of, &pkt, ost, 0);
1325 
1326  /* if two pass, output log */
1327  if (ost->logfile && enc->stats_out) {
1328  fprintf(ost->logfile, "%s", enc->stats_out);
1329  }
1330  }
1331  }
1332  ost->sync_opts++;
1333  /*
1334  * For video, number of frames in == number of packets out.
1335  * But there may be reordering, so we can't throw away frames on encoder
1336  * flush, we need to limit them here, before they go into encoder.
1337  */
1338  ost->frame_number++;
1339 
1340  if (vstats_filename && frame_size)
1341  do_video_stats(ost, frame_size);
1342  }
1343 
1344  if (!ost->last_frame)
1345  ost->last_frame = av_frame_alloc();
1346  av_frame_unref(ost->last_frame);
1347  if (next_picture && ost->last_frame)
1348  av_frame_ref(ost->last_frame, next_picture);
1349  else
1350  av_frame_free(&ost->last_frame);
1351 
1352  return;
1353 error:
1354  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1355  exit_program(1);
1356 }
1357 
1358 static double psnr(double d)
1359 {
1360  return -10.0 * log10(d);
1361 }
1362 
1364 {
1365  AVCodecContext *enc;
1366  int frame_number;
1367  double ti1, bitrate, avg_bitrate;
1368 
1369  /* this is executed just the first time do_video_stats is called */
1370  if (!vstats_file) {
1371  vstats_file = fopen(vstats_filename, "w");
1372  if (!vstats_file) {
1373  perror("fopen");
1374  exit_program(1);
1375  }
1376  }
1377 
1378  enc = ost->enc_ctx;
1379  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1380  frame_number = ost->st->nb_frames;
1381  if (vstats_version <= 1) {
1382  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1383  ost->quality / (float)FF_QP2LAMBDA);
1384  } else {
1385  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1386  ost->quality / (float)FF_QP2LAMBDA);
1387  }
1388 
1389  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1390  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1391 
1392  fprintf(vstats_file,"f_size= %6d ", frame_size);
1393  /* compute pts value */
1394  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1395  if (ti1 < 0.01)
1396  ti1 = 0.01;
1397 
1398  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1399  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1400  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1401  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1402  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1403  }
1404 }
1405 
1406 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1407 
1409 {
1410  OutputFile *of = output_files[ost->file_index];
1411  int i;
1412 
1414 
1415  if (of->shortest) {
1416  for (i = 0; i < of->ctx->nb_streams; i++)
1417  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1418  }
1419 }
1420 
1421 /**
1422  * Get and encode new output from any of the filtergraphs, without causing
1423  * activity.
1424  *
1425  * @return 0 for success, <0 for severe errors
1426  */
1427 static int reap_filters(int flush)
1428 {
1429  AVFrame *filtered_frame = NULL;
1430  int i;
1431 
1432  /* Reap all buffers present in the buffer sinks */
1433  for (i = 0; i < nb_output_streams; i++) {
1434  OutputStream *ost = output_streams[i];
1435  OutputFile *of = output_files[ost->file_index];
1437  AVCodecContext *enc = ost->enc_ctx;
1438  int ret = 0;
1439 
1440  if (!ost->filter || !ost->filter->graph->graph)
1441  continue;
1442  filter = ost->filter->filter;
1443 
1444  if (!ost->initialized) {
1445  char error[1024] = "";
1446  ret = init_output_stream(ost, error, sizeof(error));
1447  if (ret < 0) {
1448  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1449  ost->file_index, ost->index, error);
1450  exit_program(1);
1451  }
1452  }
1453 
1454  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1455  return AVERROR(ENOMEM);
1456  }
1457  filtered_frame = ost->filtered_frame;
1458 
1459  while (1) {
1460  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1461  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1463  if (ret < 0) {
1464  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1466  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1467  } else if (flush && ret == AVERROR_EOF) {
1469  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1470  }
1471  break;
1472  }
1473  if (ost->finished) {
1474  av_frame_unref(filtered_frame);
1475  continue;
1476  }
1477  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1478  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1479  AVRational filter_tb = av_buffersink_get_time_base(filter);
1480  AVRational tb = enc->time_base;
1481  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1482 
1483  tb.den <<= extra_bits;
1484  float_pts =
1485  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1486  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1487  float_pts /= 1 << extra_bits;
1488  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1489  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1490 
1491  filtered_frame->pts =
1492  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1493  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1494  }
1495  //if (ost->source_index >= 0)
1496  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1497 
1498  switch (av_buffersink_get_type(filter)) {
1499  case AVMEDIA_TYPE_VIDEO:
1500  if (!ost->frame_aspect_ratio.num)
1501  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1502 
1503  if (debug_ts) {
1504  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1505  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1506  float_pts,
1507  enc->time_base.num, enc->time_base.den);
1508  }
1509 
1510  do_video_out(of, ost, filtered_frame, float_pts);
1511  break;
1512  case AVMEDIA_TYPE_AUDIO:
1513  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1514  enc->channels != filtered_frame->channels) {
1516  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1517  break;
1518  }
1519  do_audio_out(of, ost, filtered_frame);
1520  break;
1521  default:
1522  // TODO support subtitle filters
1523  av_assert0(0);
1524  }
1525 
1526  av_frame_unref(filtered_frame);
1527  }
1528  }
1529 
1530  return 0;
1531 }
1532 
1533 static void print_final_stats(int64_t total_size)
1534 {
1535  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1536  uint64_t subtitle_size = 0;
1537  uint64_t data_size = 0;
1538  float percent = -1.0;
1539  int i, j;
1540  int pass1_used = 1;
1541 
1542  for (i = 0; i < nb_output_streams; i++) {
1543  OutputStream *ost = output_streams[i];
1544  switch (ost->enc_ctx->codec_type) {
1545  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1546  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1547  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1548  default: other_size += ost->data_size; break;
1549  }
1550  extra_size += ost->enc_ctx->extradata_size;
1551  data_size += ost->data_size;
1554  pass1_used = 0;
1555  }
1556 
1557  if (data_size && total_size>0 && total_size >= data_size)
1558  percent = 100.0 * (total_size - data_size) / data_size;
1559 
1560  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1561  video_size / 1024.0,
1562  audio_size / 1024.0,
1563  subtitle_size / 1024.0,
1564  other_size / 1024.0,
1565  extra_size / 1024.0);
1566  if (percent >= 0.0)
1567  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1568  else
1569  av_log(NULL, AV_LOG_INFO, "unknown");
1570  av_log(NULL, AV_LOG_INFO, "\n");
1571 
1572  /* print verbose per-stream stats */
1573  for (i = 0; i < nb_input_files; i++) {
1574  InputFile *f = input_files[i];
1575  uint64_t total_packets = 0, total_size = 0;
1576 
1577  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1578  i, f->ctx->url);
1579 
1580  for (j = 0; j < f->nb_streams; j++) {
1581  InputStream *ist = input_streams[f->ist_index + j];
1582  enum AVMediaType type = ist->dec_ctx->codec_type;
1583 
1584  total_size += ist->data_size;
1585  total_packets += ist->nb_packets;
1586 
1587  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1588  i, j, media_type_string(type));
1589  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1590  ist->nb_packets, ist->data_size);
1591 
1592  if (ist->decoding_needed) {
1593  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1594  ist->frames_decoded);
1595  if (type == AVMEDIA_TYPE_AUDIO)
1596  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1597  av_log(NULL, AV_LOG_VERBOSE, "; ");
1598  }
1599 
1600  av_log(NULL, AV_LOG_VERBOSE, "\n");
1601  }
1602 
1603  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1604  total_packets, total_size);
1605  }
1606 
1607  for (i = 0; i < nb_output_files; i++) {
1608  OutputFile *of = output_files[i];
1609  uint64_t total_packets = 0, total_size = 0;
1610 
1611  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1612  i, of->ctx->url);
1613 
1614  for (j = 0; j < of->ctx->nb_streams; j++) {
1615  OutputStream *ost = output_streams[of->ost_index + j];
1616  enum AVMediaType type = ost->enc_ctx->codec_type;
1617 
1618  total_size += ost->data_size;
1619  total_packets += ost->packets_written;
1620 
1621  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1622  i, j, media_type_string(type));
1623  if (ost->encoding_needed) {
1624  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1625  ost->frames_encoded);
1626  if (type == AVMEDIA_TYPE_AUDIO)
1627  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1628  av_log(NULL, AV_LOG_VERBOSE, "; ");
1629  }
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1632  ost->packets_written, ost->data_size);
1633 
1634  av_log(NULL, AV_LOG_VERBOSE, "\n");
1635  }
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1638  total_packets, total_size);
1639  }
1640  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1641  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1642  if (pass1_used) {
1643  av_log(NULL, AV_LOG_WARNING, "\n");
1644  } else {
1645  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1646  }
1647  }
1648 }
1649 
1650 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1651 {
1652  AVBPrint buf, buf_script;
1653  OutputStream *ost;
1654  AVFormatContext *oc;
1655  int64_t total_size;
1656  AVCodecContext *enc;
1657  int frame_number, vid, i;
1658  double bitrate;
1659  double speed;
1660  int64_t pts = INT64_MIN + 1;
1661  static int64_t last_time = -1;
1662  static int qp_histogram[52];
1663  int hours, mins, secs, us;
1664  const char *hours_sign;
1665  int ret;
1666  float t;
1667 
1668  if (!print_stats && !is_last_report && !progress_avio)
1669  return;
1670 
1671  if (!is_last_report) {
1672  if (last_time == -1) {
1673  last_time = cur_time;
1674  return;
1675  }
1676  if ((cur_time - last_time) < 500000)
1677  return;
1678  last_time = cur_time;
1679  }
1680 
1681  t = (cur_time-timer_start) / 1000000.0;
1682 
1683 
1684  oc = output_files[0]->ctx;
1685 
1686  total_size = avio_size(oc->pb);
1687  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1688  total_size = avio_tell(oc->pb);
1689 
1690  vid = 0;
1692  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1693  for (i = 0; i < nb_output_streams; i++) {
1694  float q = -1;
1695  ost = output_streams[i];
1696  enc = ost->enc_ctx;
1697  if (!ost->stream_copy)
1698  q = ost->quality / (float) FF_QP2LAMBDA;
1699 
1700  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701  av_bprintf(&buf, "q=%2.1f ", q);
1702  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703  ost->file_index, ost->index, q);
1704  }
1705  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1706  float fps;
1707 
1708  frame_number = ost->frame_number;
1709  fps = t > 1 ? frame_number / t : 0;
1710  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1711  frame_number, fps < 9.95, fps, q);
1712  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1713  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1714  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1715  ost->file_index, ost->index, q);
1716  if (is_last_report)
1717  av_bprintf(&buf, "L");
1718  if (qp_hist) {
1719  int j;
1720  int qp = lrintf(q);
1721  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1722  qp_histogram[qp]++;
1723  for (j = 0; j < 32; j++)
1724  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1725  }
1726 
1727  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1728  int j;
1729  double error, error_sum = 0;
1730  double scale, scale_sum = 0;
1731  double p;
1732  char type[3] = { 'Y','U','V' };
1733  av_bprintf(&buf, "PSNR=");
1734  for (j = 0; j < 3; j++) {
1735  if (is_last_report) {
1736  error = enc->error[j];
1737  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1738  } else {
1739  error = ost->error[j];
1740  scale = enc->width * enc->height * 255.0 * 255.0;
1741  }
1742  if (j)
1743  scale /= 4;
1744  error_sum += error;
1745  scale_sum += scale;
1746  p = psnr(error / scale);
1747  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1748  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1749  ost->file_index, ost->index, type[j] | 32, p);
1750  }
1751  p = psnr(error_sum / scale_sum);
1752  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1753  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1754  ost->file_index, ost->index, p);
1755  }
1756  vid = 1;
1757  }
1758  /* compute min output value */
1760  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1761  ost->st->time_base, AV_TIME_BASE_Q));
1762  if (is_last_report)
1763  nb_frames_drop += ost->last_dropped;
1764  }
1765 
1766  secs = FFABS(pts) / AV_TIME_BASE;
1767  us = FFABS(pts) % AV_TIME_BASE;
1768  mins = secs / 60;
1769  secs %= 60;
1770  hours = mins / 60;
1771  mins %= 60;
1772  hours_sign = (pts < 0) ? "-" : "";
1773 
1774  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1775  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1776 
1777  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1778  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1779  if (pts == AV_NOPTS_VALUE) {
1780  av_bprintf(&buf, "N/A ");
1781  } else {
1782  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1783  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1784  }
1785 
1786  if (bitrate < 0) {
1787  av_bprintf(&buf, "bitrate=N/A");
1788  av_bprintf(&buf_script, "bitrate=N/A\n");
1789  }else{
1790  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1791  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1792  }
1793 
1794  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1795  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1796  if (pts == AV_NOPTS_VALUE) {
1797  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1798  av_bprintf(&buf_script, "out_time=N/A\n");
1799  } else {
1800  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1801  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1802  hours_sign, hours, mins, secs, us);
1803  }
1804 
1806  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1807  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1808  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1809 
1810  if (speed < 0) {
1811  av_bprintf(&buf, " speed=N/A");
1812  av_bprintf(&buf_script, "speed=N/A\n");
1813  } else {
1814  av_bprintf(&buf, " speed=%4.3gx", speed);
1815  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1816  }
1817 
1818  if (print_stats || is_last_report) {
1819  const char end = is_last_report ? '\n' : '\r';
1820  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1821  fprintf(stderr, "%s %c", buf.str, end);
1822  } else
1823  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1824 
1825  fflush(stderr);
1826  }
1827  av_bprint_finalize(&buf, NULL);
1828 
1829  if (progress_avio) {
1830  av_bprintf(&buf_script, "progress=%s\n",
1831  is_last_report ? "end" : "continue");
1832  avio_write(progress_avio, buf_script.str,
1833  FFMIN(buf_script.len, buf_script.size - 1));
1834  avio_flush(progress_avio);
1835  av_bprint_finalize(&buf_script, NULL);
1836  if (is_last_report) {
1837  if ((ret = avio_closep(&progress_avio)) < 0)
1839  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1840  }
1841  }
1842 
1843  if (is_last_report)
1844  print_final_stats(total_size);
1845 }
1846 
1848 {
1849  // We never got any input. Set a fake format, which will
1850  // come from libavformat.
1851  ifilter->format = par->format;
1852  ifilter->sample_rate = par->sample_rate;
1853  ifilter->channels = par->channels;
1854  ifilter->channel_layout = par->channel_layout;
1855  ifilter->width = par->width;
1856  ifilter->height = par->height;
1857  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1858 }
1859 
1860 static void flush_encoders(void)
1861 {
1862  int i, ret;
1863 
1864  for (i = 0; i < nb_output_streams; i++) {
1865  OutputStream *ost = output_streams[i];
1866  AVCodecContext *enc = ost->enc_ctx;
1867  OutputFile *of = output_files[ost->file_index];
1868 
1869  if (!ost->encoding_needed)
1870  continue;
1871 
1872  // Try to enable encoding with no input frames.
1873  // Maybe we should just let encoding fail instead.
1874  if (!ost->initialized) {
1875  FilterGraph *fg = ost->filter->graph;
1876  char error[1024] = "";
1877 
1879  "Finishing stream %d:%d without any data written to it.\n",
1880  ost->file_index, ost->st->index);
1881 
1882  if (ost->filter && !fg->graph) {
1883  int x;
1884  for (x = 0; x < fg->nb_inputs; x++) {
1885  InputFilter *ifilter = fg->inputs[x];
1886  if (ifilter->format < 0)
1887  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1888  }
1889 
1891  continue;
1892 
1893  ret = configure_filtergraph(fg);
1894  if (ret < 0) {
1895  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1896  exit_program(1);
1897  }
1898 
1899  finish_output_stream(ost);
1900  }
1901 
1902  ret = init_output_stream(ost, error, sizeof(error));
1903  if (ret < 0) {
1904  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1905  ost->file_index, ost->index, error);
1906  exit_program(1);
1907  }
1908  }
1909 
1910  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1911  continue;
1912 
1914  continue;
1915 
1916  for (;;) {
1917  const char *desc = NULL;
1918  AVPacket pkt;
1919  int pkt_size;
1920 
1921  switch (enc->codec_type) {
1922  case AVMEDIA_TYPE_AUDIO:
1923  desc = "audio";
1924  break;
1925  case AVMEDIA_TYPE_VIDEO:
1926  desc = "video";
1927  break;
1928  default:
1929  av_assert0(0);
1930  }
1931 
1932  av_init_packet(&pkt);
1933  pkt.data = NULL;
1934  pkt.size = 0;
1935 
1937 
1938  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1939  ret = avcodec_send_frame(enc, NULL);
1940  if (ret < 0) {
1941  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1942  desc,
1943  av_err2str(ret));
1944  exit_program(1);
1945  }
1946  }
1947 
1948  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1949  if (ret < 0 && ret != AVERROR_EOF) {
1950  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1951  desc,
1952  av_err2str(ret));
1953  exit_program(1);
1954  }
1955  if (ost->logfile && enc->stats_out) {
1956  fprintf(ost->logfile, "%s", enc->stats_out);
1957  }
1958  if (ret == AVERROR_EOF) {
1959  output_packet(of, &pkt, ost, 1);
1960  break;
1961  }
1962  if (ost->finished & MUXER_FINISHED) {
1963  av_packet_unref(&pkt);
1964  continue;
1965  }
1966  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1967  pkt_size = pkt.size;
1968  output_packet(of, &pkt, ost, 0);
1970  do_video_stats(ost, pkt_size);
1971  }
1972  }
1973  }
1974 }
1975 
1976 /*
1977  * Check whether a packet from ist should be written into ost at this time
1978  */
1980 {
1981  OutputFile *of = output_files[ost->file_index];
1982  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1983 
1984  if (ost->source_index != ist_index)
1985  return 0;
1986 
1987  if (ost->finished)
1988  return 0;
1989 
1990  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1991  return 0;
1992 
1993  return 1;
1994 }
1995 
1997 {
1998  OutputFile *of = output_files[ost->file_index];
1999  InputFile *f = input_files [ist->file_index];
2000  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2001  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2002  AVPacket opkt = { 0 };
2003 
2004  av_init_packet(&opkt);
2005 
2006  // EOF: flush output bitstream filters.
2007  if (!pkt) {
2008  output_packet(of, &opkt, ost, 1);
2009  return;
2010  }
2011 
2012  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2014  return;
2015 
2016  if (!ost->frame_number && !ost->copy_prior_start) {
2017  int64_t comp_start = start_time;
2018  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2019  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2020  if (pkt->pts == AV_NOPTS_VALUE ?
2021  ist->pts < comp_start :
2022  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2023  return;
2024  }
2025 
2026  if (of->recording_time != INT64_MAX &&
2027  ist->pts >= of->recording_time + start_time) {
2028  close_output_stream(ost);
2029  return;
2030  }
2031 
2032  if (f->recording_time != INT64_MAX) {
2033  start_time = f->ctx->start_time;
2034  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2035  start_time += f->start_time;
2036  if (ist->pts >= f->recording_time + start_time) {
2037  close_output_stream(ost);
2038  return;
2039  }
2040  }
2041 
2042  /* force the input stream PTS */
2043  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2044  ost->sync_opts++;
2045 
2046  if (pkt->pts != AV_NOPTS_VALUE)
2047  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2048  else
2049  opkt.pts = AV_NOPTS_VALUE;
2050 
2051  if (pkt->dts == AV_NOPTS_VALUE)
2052  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2053  else
2054  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2055  opkt.dts -= ost_tb_start_time;
2056 
2057  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2059  if(!duration)
2060  duration = ist->dec_ctx->frame_size;
2061  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2063  ost->mux_timebase) - ost_tb_start_time;
2064  }
2065 
2066  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2067 
2068  opkt.flags = pkt->flags;
2069 
2070  if (pkt->buf) {
2071  opkt.buf = av_buffer_ref(pkt->buf);
2072  if (!opkt.buf)
2073  exit_program(1);
2074  }
2075  opkt.data = pkt->data;
2076  opkt.size = pkt->size;
2077 
2078  av_copy_packet_side_data(&opkt, pkt);
2079 
2080  output_packet(of, &opkt, ost, 0);
2081 }
2082 
2084 {
2085  AVCodecContext *dec = ist->dec_ctx;
2086 
2087  if (!dec->channel_layout) {
2088  char layout_name[256];
2089 
2090  if (dec->channels > ist->guess_layout_max)
2091  return 0;
2093  if (!dec->channel_layout)
2094  return 0;
2095  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2096  dec->channels, dec->channel_layout);
2097  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2098  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2099  }
2100  return 1;
2101 }
2102 
2103 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2104 {
2105  if (*got_output || ret<0)
2106  decode_error_stat[ret<0] ++;
2107 
2108  if (ret < 0 && exit_on_error)
2109  exit_program(1);
2110 
2111  if (exit_on_error && *got_output && ist) {
2113  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2114  exit_program(1);
2115  }
2116  }
2117 }
2118 
2119 // Filters can be configured only if the formats of all inputs are known.
2121 {
2122  int i;
2123  for (i = 0; i < fg->nb_inputs; i++) {
2124  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2126  return 0;
2127  }
2128  return 1;
2129 }
2130 
2132 {
2133  FilterGraph *fg = ifilter->graph;
2134  int need_reinit, ret, i;
2135 
2136  /* determine if the parameters for this input changed */
2137  need_reinit = ifilter->format != frame->format;
2138  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2139  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2140  need_reinit = 1;
2141 
2142  switch (ifilter->ist->st->codecpar->codec_type) {
2143  case AVMEDIA_TYPE_AUDIO:
2144  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2145  ifilter->channels != frame->channels ||
2146  ifilter->channel_layout != frame->channel_layout;
2147  break;
2148  case AVMEDIA_TYPE_VIDEO:
2149  need_reinit |= ifilter->width != frame->width ||
2150  ifilter->height != frame->height;
2151  break;
2152  }
2153 
2154  if (need_reinit) {
2155  ret = ifilter_parameters_from_frame(ifilter, frame);
2156  if (ret < 0)
2157  return ret;
2158  }
2159 
2160  /* (re)init the graph if possible, otherwise buffer the frame and return */
2161  if (need_reinit || !fg->graph) {
2162  for (i = 0; i < fg->nb_inputs; i++) {
2163  if (!ifilter_has_all_input_formats(fg)) {
2164  AVFrame *tmp = av_frame_clone(frame);
2165  if (!tmp)
2166  return AVERROR(ENOMEM);
2167  av_frame_unref(frame);
2168 
2169  if (!av_fifo_space(ifilter->frame_queue)) {
2170  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2171  if (ret < 0) {
2172  av_frame_free(&tmp);
2173  return ret;
2174  }
2175  }
2176  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2177  return 0;
2178  }
2179  }
2180 
2181  ret = reap_filters(1);
2182  if (ret < 0 && ret != AVERROR_EOF) {
2183  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2184  return ret;
2185  }
2186 
2187  ret = configure_filtergraph(fg);
2188  if (ret < 0) {
2189  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2190  return ret;
2191  }
2192  }
2193 
2195  if (ret < 0) {
2196  if (ret != AVERROR_EOF)
2197  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2198  return ret;
2199  }
2200 
2201  return 0;
2202 }
2203 
2204 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2205 {
2206  int ret;
2207 
2208  ifilter->eof = 1;
2209 
2210  if (ifilter->filter) {
2211  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2212  if (ret < 0)
2213  return ret;
2214  } else {
2215  // the filtergraph was never configured
2216  if (ifilter->format < 0)
2217  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2218  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2219  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2220  return AVERROR_INVALIDDATA;
2221  }
2222  }
2223 
2224  return 0;
2225 }
2226 
2227 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2228 // There is the following difference: if you got a frame, you must call
2229 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2230 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2231 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2232 {
2233  int ret;
2234 
2235  *got_frame = 0;
2236 
2237  if (pkt) {
2238  ret = avcodec_send_packet(avctx, pkt);
2239  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2240  // decoded frames with avcodec_receive_frame() until done.
2241  if (ret < 0 && ret != AVERROR_EOF)
2242  return ret;
2243  }
2244 
2245  ret = avcodec_receive_frame(avctx, frame);
2246  if (ret < 0 && ret != AVERROR(EAGAIN))
2247  return ret;
2248  if (ret >= 0)
2249  *got_frame = 1;
2250 
2251  return 0;
2252 }
2253 
2254 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2255 {
2256  int i, ret;
2257  AVFrame *f;
2258 
2259  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2260  for (i = 0; i < ist->nb_filters; i++) {
2261  if (i < ist->nb_filters - 1) {
2262  f = ist->filter_frame;
2263  ret = av_frame_ref(f, decoded_frame);
2264  if (ret < 0)
2265  break;
2266  } else
2267  f = decoded_frame;
2268  ret = ifilter_send_frame(ist->filters[i], f);
2269  if (ret == AVERROR_EOF)
2270  ret = 0; /* ignore */
2271  if (ret < 0) {
2273  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2274  break;
2275  }
2276  }
2277  return ret;
2278 }
2279 
2280 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2281  int *decode_failed)
2282 {
2283  AVFrame *decoded_frame;
2284  AVCodecContext *avctx = ist->dec_ctx;
2285  int ret, err = 0;
2286  AVRational decoded_frame_tb;
2287 
2288  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2289  return AVERROR(ENOMEM);
2290  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2291  return AVERROR(ENOMEM);
2292  decoded_frame = ist->decoded_frame;
2293 
2295  ret = decode(avctx, decoded_frame, got_output, pkt);
2296  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2297  if (ret < 0)
2298  *decode_failed = 1;
2299 
2300  if (ret >= 0 && avctx->sample_rate <= 0) {
2301  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2302  ret = AVERROR_INVALIDDATA;
2303  }
2304 
2305  if (ret != AVERROR_EOF)
2306  check_decode_result(ist, got_output, ret);
2307 
2308  if (!*got_output || ret < 0)
2309  return ret;
2310 
2311  ist->samples_decoded += decoded_frame->nb_samples;
2312  ist->frames_decoded++;
2313 
2314 #if 1
2315  /* increment next_dts to use for the case where the input stream does not
2316  have timestamps or there are multiple frames in the packet */
2317  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2318  avctx->sample_rate;
2319  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320  avctx->sample_rate;
2321 #endif
2322 
2323  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324  decoded_frame_tb = ist->st->time_base;
2325  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326  decoded_frame->pts = pkt->pts;
2327  decoded_frame_tb = ist->st->time_base;
2328  }else {
2329  decoded_frame->pts = ist->dts;
2330  decoded_frame_tb = AV_TIME_BASE_Q;
2331  }
2332  if (decoded_frame->pts != AV_NOPTS_VALUE)
2333  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335  (AVRational){1, avctx->sample_rate});
2336  ist->nb_samples = decoded_frame->nb_samples;
2337  err = send_frame_to_filters(ist, decoded_frame);
2338 
2339  av_frame_unref(ist->filter_frame);
2340  av_frame_unref(decoded_frame);
2341  return err < 0 ? err : ret;
2342 }
2343 
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2345  int *decode_failed)
2346 {
2347  AVFrame *decoded_frame;
2348  int i, ret = 0, err = 0;
2349  int64_t best_effort_timestamp;
2350  int64_t dts = AV_NOPTS_VALUE;
2351  AVPacket avpkt;
2352 
2353  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2355  // skip the packet.
2356  if (!eof && pkt && pkt->size == 0)
2357  return 0;
2358 
2359  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360  return AVERROR(ENOMEM);
2361  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362  return AVERROR(ENOMEM);
2363  decoded_frame = ist->decoded_frame;
2364  if (ist->dts != AV_NOPTS_VALUE)
2365  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2366  if (pkt) {
2367  avpkt = *pkt;
2368  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2369  }
2370 
2371  // The old code used to set dts on the drain packet, which does not work
2372  // with the new API anymore.
2373  if (eof) {
2374  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2375  if (!new)
2376  return AVERROR(ENOMEM);
2377  ist->dts_buffer = new;
2378  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2379  }
2380 
2382  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2384  if (ret < 0)
2385  *decode_failed = 1;
2386 
2387  // The following line may be required in some cases where there is no parser
2388  // or the parser does not has_b_frames correctly
2389  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2392  } else
2394  "video_delay is larger in decoder than demuxer %d > %d.\n"
2395  "If you want to help, upload a sample "
2396  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398  ist->dec_ctx->has_b_frames,
2399  ist->st->codecpar->video_delay);
2400  }
2401 
2402  if (ret != AVERROR_EOF)
2403  check_decode_result(ist, got_output, ret);
2404 
2405  if (*got_output && ret >= 0) {
2406  if (ist->dec_ctx->width != decoded_frame->width ||
2407  ist->dec_ctx->height != decoded_frame->height ||
2408  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410  decoded_frame->width,
2411  decoded_frame->height,
2412  decoded_frame->format,
2413  ist->dec_ctx->width,
2414  ist->dec_ctx->height,
2415  ist->dec_ctx->pix_fmt);
2416  }
2417  }
2418 
2419  if (!*got_output || ret < 0)
2420  return ret;
2421 
2422  if(ist->top_field_first>=0)
2423  decoded_frame->top_field_first = ist->top_field_first;
2424 
2425  ist->frames_decoded++;
2426 
2427  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2429  if (err < 0)
2430  goto fail;
2431  }
2432  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2433 
2434  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435  *duration_pts = decoded_frame->pkt_duration;
2436 
2437  if (ist->framerate.num)
2438  best_effort_timestamp = ist->cfr_next_pts++;
2439 
2440  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441  best_effort_timestamp = ist->dts_buffer[0];
2442 
2443  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445  ist->nb_dts_buffer--;
2446  }
2447 
2448  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2450 
2451  if (ts != AV_NOPTS_VALUE)
2452  ist->next_pts = ist->pts = ts;
2453  }
2454 
2455  if (debug_ts) {
2456  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458  ist->st->index, av_ts2str(decoded_frame->pts),
2459  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460  best_effort_timestamp,
2461  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462  decoded_frame->key_frame, decoded_frame->pict_type,
2463  ist->st->time_base.num, ist->st->time_base.den);
2464  }
2465 
2466  if (ist->st->sample_aspect_ratio.num)
2467  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2468 
2469  err = send_frame_to_filters(ist, decoded_frame);
2470 
2471 fail:
2473  av_frame_unref(decoded_frame);
2474  return err < 0 ? err : ret;
2475 }
2476 
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2478  int *decode_failed)
2479 {
2480  AVSubtitle subtitle;
2481  int free_sub = 1;
2482  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483  &subtitle, got_output, pkt);
2484 
2485  check_decode_result(NULL, got_output, ret);
2486 
2487  if (ret < 0 || !*got_output) {
2488  *decode_failed = 1;
2489  if (!pkt->size)
2490  sub2video_flush(ist);
2491  return ret;
2492  }
2493 
2494  if (ist->fix_sub_duration) {
2495  int end = 1;
2496  if (ist->prev_sub.got_output) {
2497  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498  1000, AV_TIME_BASE);
2499  if (end < ist->prev_sub.subtitle.end_display_time) {
2500  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2503  end <= 0 ? ", dropping it" : "");
2505  }
2506  }
2507  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508  FFSWAP(int, ret, ist->prev_sub.ret);
2509  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2510  if (end <= 0)
2511  goto out;
2512  }
2513 
2514  if (!*got_output)
2515  return ret;
2516 
2517  if (ist->sub2video.frame) {
2518  sub2video_update(ist, &subtitle);
2519  } else if (ist->nb_filters) {
2520  if (!ist->sub2video.sub_queue)
2521  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522  if (!ist->sub2video.sub_queue)
2523  exit_program(1);
2524  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2526  if (ret < 0)
2527  exit_program(1);
2528  }
2529  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2530  free_sub = 0;
2531  }
2532 
2533  if (!subtitle.num_rects)
2534  goto out;
2535 
2536  ist->frames_decoded++;
2537 
2538  for (i = 0; i < nb_output_streams; i++) {
2539  OutputStream *ost = output_streams[i];
2540 
2541  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2543  continue;
2544 
2545  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2546  }
2547 
2548 out:
2549  if (free_sub)
2550  avsubtitle_free(&subtitle);
2551  return ret;
2552 }
2553 
2555 {
2556  int i, ret;
2557  /* TODO keep pts also in stream time base to avoid converting back */
2558  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2560 
2561  for (i = 0; i < ist->nb_filters; i++) {
2562  ret = ifilter_send_eof(ist->filters[i], pts);
2563  if (ret < 0)
2564  return ret;
2565  }
2566  return 0;
2567 }
2568 
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2571 {
2572  int ret = 0, i;
2573  int repeating = 0;
2574  int eof_reached = 0;
2575 
2576  AVPacket avpkt;
2577  if (!ist->saw_first_ts) {
2578  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2579  ist->pts = 0;
2580  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2583  }
2584  ist->saw_first_ts = 1;
2585  }
2586 
2587  if (ist->next_dts == AV_NOPTS_VALUE)
2588  ist->next_dts = ist->dts;
2589  if (ist->next_pts == AV_NOPTS_VALUE)
2590  ist->next_pts = ist->pts;
2591 
2592  if (!pkt) {
2593  /* EOF handling */
2594  av_init_packet(&avpkt);
2595  avpkt.data = NULL;
2596  avpkt.size = 0;
2597  } else {
2598  avpkt = *pkt;
2599  }
2600 
2601  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604  ist->next_pts = ist->pts = ist->dts;
2605  }
2606 
2607  // while we have more to decode or while the decoder did output something on EOF
2608  while (ist->decoding_needed) {
2609  int64_t duration_dts = 0;
2610  int64_t duration_pts = 0;
2611  int got_output = 0;
2612  int decode_failed = 0;
2613 
2614  ist->pts = ist->next_pts;
2615  ist->dts = ist->next_dts;
2616 
2617  switch (ist->dec_ctx->codec_type) {
2618  case AVMEDIA_TYPE_AUDIO:
2619  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2620  &decode_failed);
2621  break;
2622  case AVMEDIA_TYPE_VIDEO:
2623  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2624  &decode_failed);
2625  if (!repeating || !pkt || got_output) {
2626  if (pkt && pkt->duration) {
2627  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2630  duration_dts = ((int64_t)AV_TIME_BASE *
2631  ist->dec_ctx->framerate.den * ticks) /
2633  }
2634 
2635  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636  ist->next_dts += duration_dts;
2637  }else
2638  ist->next_dts = AV_NOPTS_VALUE;
2639  }
2640 
2641  if (got_output) {
2642  if (duration_pts > 0) {
2643  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2644  } else {
2645  ist->next_pts += duration_dts;
2646  }
2647  }
2648  break;
2649  case AVMEDIA_TYPE_SUBTITLE:
2650  if (repeating)
2651  break;
2652  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653  if (!pkt && ret >= 0)
2654  ret = AVERROR_EOF;
2655  break;
2656  default:
2657  return -1;
2658  }
2659 
2660  if (ret == AVERROR_EOF) {
2661  eof_reached = 1;
2662  break;
2663  }
2664 
2665  if (ret < 0) {
2666  if (decode_failed) {
2667  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668  ist->file_index, ist->st->index, av_err2str(ret));
2669  } else {
2670  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2672  }
2673  if (!decode_failed || exit_on_error)
2674  exit_program(1);
2675  break;
2676  }
2677 
2678  if (got_output)
2679  ist->got_output = 1;
2680 
2681  if (!got_output)
2682  break;
2683 
2684  // During draining, we might get multiple output frames in this loop.
2685  // ffmpeg.c does not drain the filter chain on configuration changes,
2686  // which means if we send multiple frames at once to the filters, and
2687  // one of those frames changes configuration, the buffered frames will
2688  // be lost. This can upset certain FATE tests.
2689  // Decode only 1 frame per call on EOF to appease these FATE tests.
2690  // The ideal solution would be to rewrite decoding to use the new
2691  // decoding API in a better way.
2692  if (!pkt)
2693  break;
2694 
2695  repeating = 1;
2696  }
2697 
2698  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699  /* except when looping we need to flush but not to send an EOF */
2700  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701  int ret = send_filter_eof(ist);
2702  if (ret < 0) {
2703  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2704  exit_program(1);
2705  }
2706  }
2707 
2708  /* handle stream copy */
2709  if (!ist->decoding_needed && pkt) {
2710  ist->dts = ist->next_dts;
2711  switch (ist->dec_ctx->codec_type) {
2712  case AVMEDIA_TYPE_AUDIO:
2713  av_assert1(pkt->duration >= 0);
2714  if (ist->dec_ctx->sample_rate) {
2715  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2716  ist->dec_ctx->sample_rate;
2717  } else {
2718  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2719  }
2720  break;
2721  case AVMEDIA_TYPE_VIDEO:
2722  if (ist->framerate.num) {
2723  // TODO: Remove work-around for c99-to-c89 issue 7
2724  AVRational time_base_q = AV_TIME_BASE_Q;
2725  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727  } else if (pkt->duration) {
2728  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729  } else if(ist->dec_ctx->framerate.num != 0) {
2730  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731  ist->next_dts += ((int64_t)AV_TIME_BASE *
2732  ist->dec_ctx->framerate.den * ticks) /
2734  }
2735  break;
2736  }
2737  ist->pts = ist->dts;
2738  ist->next_pts = ist->next_dts;
2739  }
2740  for (i = 0; i < nb_output_streams; i++) {
2741  OutputStream *ost = output_streams[i];
2742 
2743  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2744  continue;
2745 
2746  do_streamcopy(ist, ost, pkt);
2747  }
2748 
2749  return !eof_reached;
2750 }
2751 
2752 static void print_sdp(void)
2753 {
2754  char sdp[16384];
2755  int i;
2756  int j;
2757  AVIOContext *sdp_pb;
2758  AVFormatContext **avc;
2759 
2760  for (i = 0; i < nb_output_files; i++) {
2761  if (!output_files[i]->header_written)
2762  return;
2763  }
2764 
2765  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2766  if (!avc)
2767  exit_program(1);
2768  for (i = 0, j = 0; i < nb_output_files; i++) {
2769  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770  avc[j] = output_files[i]->ctx;
2771  j++;
2772  }
2773  }
2774 
2775  if (!j)
2776  goto fail;
2777 
2778  av_sdp_create(avc, j, sdp, sizeof(sdp));
2779 
2780  if (!sdp_filename) {
2781  printf("SDP:\n%s\n", sdp);
2782  fflush(stdout);
2783  } else {
2784  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2786  } else {
2787  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2788  avio_closep(&sdp_pb);
2790  }
2791  }
2792 
2793 fail:
2794  av_freep(&avc);
2795 }
2796 
2798 {
2799  InputStream *ist = s->opaque;
2800  const enum AVPixelFormat *p;
2801  int ret;
2802 
2803  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2805  const AVCodecHWConfig *config = NULL;
2806  int i;
2807 
2808  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2809  break;
2810 
2811  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2812  ist->hwaccel_id == HWACCEL_AUTO) {
2813  for (i = 0;; i++) {
2814  config = avcodec_get_hw_config(s->codec, i);
2815  if (!config)
2816  break;
2817  if (!(config->methods &
2819  continue;
2820  if (config->pix_fmt == *p)
2821  break;
2822  }
2823  }
2824  if (config) {
2825  if (config->device_type != ist->hwaccel_device_type) {
2826  // Different hwaccel offered, ignore.
2827  continue;
2828  }
2829 
2830  ret = hwaccel_decode_init(s);
2831  if (ret < 0) {
2832  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2834  "%s hwaccel requested for input stream #%d:%d, "
2835  "but cannot be initialized.\n",
2837  ist->file_index, ist->st->index);
2838  return AV_PIX_FMT_NONE;
2839  }
2840  continue;
2841  }
2842  } else {
2843  const HWAccel *hwaccel = NULL;
2844  int i;
2845  for (i = 0; hwaccels[i].name; i++) {
2846  if (hwaccels[i].pix_fmt == *p) {
2847  hwaccel = &hwaccels[i];
2848  break;
2849  }
2850  }
2851  if (!hwaccel) {
2852  // No hwaccel supporting this pixfmt.
2853  continue;
2854  }
2855  if (hwaccel->id != ist->hwaccel_id) {
2856  // Does not match requested hwaccel.
2857  continue;
2858  }
2859 
2860  ret = hwaccel->init(s);
2861  if (ret < 0) {
2863  "%s hwaccel requested for input stream #%d:%d, "
2864  "but cannot be initialized.\n", hwaccel->name,
2865  ist->file_index, ist->st->index);
2866  return AV_PIX_FMT_NONE;
2867  }
2868  }
2869 
2870  if (ist->hw_frames_ctx) {
2872  if (!s->hw_frames_ctx)
2873  return AV_PIX_FMT_NONE;
2874  }
2875 
2876  ist->hwaccel_pix_fmt = *p;
2877  break;
2878  }
2879 
2880  return *p;
2881 }
2882 
2884 {
2885  InputStream *ist = s->opaque;
2886 
2887  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2888  return ist->hwaccel_get_buffer(s, frame, flags);
2889 
2890  return avcodec_default_get_buffer2(s, frame, flags);
2891 }
2892 
2893 static int init_input_stream(int ist_index, char *error, int error_len)
2894 {
2895  int ret;
2896  InputStream *ist = input_streams[ist_index];
2897 
2898  if (ist->decoding_needed) {
2899  AVCodec *codec = ist->dec;
2900  if (!codec) {
2901  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2902  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2903  return AVERROR(EINVAL);
2904  }
2905 
2906  ist->dec_ctx->opaque = ist;
2907  ist->dec_ctx->get_format = get_format;
2908  ist->dec_ctx->get_buffer2 = get_buffer;
2909  ist->dec_ctx->thread_safe_callbacks = 1;
2910 
2911  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2912  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2913  (ist->decoding_needed & DECODING_FOR_OST)) {
2914  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2916  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2917  }
2918 
2919  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2920 
2921  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2922  * audio, and video decoders such as cuvid or mediacodec */
2923  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2924 
2925  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2926  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2927  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2929  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2930 
2931  ret = hw_device_setup_for_decode(ist);
2932  if (ret < 0) {
2933  snprintf(error, error_len, "Device setup failed for "
2934  "decoder on input stream #%d:%d : %s",
2935  ist->file_index, ist->st->index, av_err2str(ret));
2936  return ret;
2937  }
2938 
2939  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2940  if (ret == AVERROR_EXPERIMENTAL)
2941  abort_codec_experimental(codec, 0);
2942 
2943  snprintf(error, error_len,
2944  "Error while opening decoder for input stream "
2945  "#%d:%d : %s",
2946  ist->file_index, ist->st->index, av_err2str(ret));
2947  return ret;
2948  }
2950  }
2951 
2952  ist->next_pts = AV_NOPTS_VALUE;
2953  ist->next_dts = AV_NOPTS_VALUE;
2954 
2955  return 0;
2956 }
2957 
2959 {
2960  if (ost->source_index >= 0)
2961  return input_streams[ost->source_index];
2962  return NULL;
2963 }
2964 
2965 static int compare_int64(const void *a, const void *b)
2966 {
2967  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2968 }
2969 
2970 /* open the muxer when all the streams are initialized */
2971 static int check_init_output_file(OutputFile *of, int file_index)
2972 {
2973  int ret, i;
2974 
2975  for (i = 0; i < of->ctx->nb_streams; i++) {
2976  OutputStream *ost = output_streams[of->ost_index + i];
2977  if (!ost->initialized)
2978  return 0;
2979  }
2980 
2981  of->ctx->interrupt_callback = int_cb;
2982 
2983  ret = avformat_write_header(of->ctx, &of->opts);
2984  if (ret < 0) {
2986  "Could not write header for output file #%d "
2987  "(incorrect codec parameters ?): %s\n",
2988  file_index, av_err2str(ret));
2989  return ret;
2990  }
2991  //assert_avoptions(of->opts);
2992  of->header_written = 1;
2993 
2994  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2995 
2996  if (sdp_filename || want_sdp)
2997  print_sdp();
2998 
2999  /* flush the muxing queues */
3000  for (i = 0; i < of->ctx->nb_streams; i++) {
3001  OutputStream *ost = output_streams[of->ost_index + i];
3002 
3003  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3004  if (!av_fifo_size(ost->muxing_queue))
3005  ost->mux_timebase = ost->st->time_base;
3006 
3007  while (av_fifo_size(ost->muxing_queue)) {
3008  AVPacket pkt;
3009  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3010  write_packet(of, &pkt, ost, 1);
3011  }
3012  }
3013 
3014  return 0;
3015 }
3016 
3018 {
3019  AVBSFContext *ctx;
3020  int i, ret;
3021 
3022  if (!ost->nb_bitstream_filters)
3023  return 0;
3024 
3025  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3026  ctx = ost->bsf_ctx[i];
3027 
3028  ret = avcodec_parameters_copy(ctx->par_in,
3029  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3030  if (ret < 0)
3031  return ret;
3032 
3033  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3034 
3035  ret = av_bsf_init(ctx);
3036  if (ret < 0) {
3037  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3038  ost->bsf_ctx[i]->filter->name);
3039  return ret;
3040  }
3041  }
3042 
3043  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3044  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3045  if (ret < 0)
3046  return ret;
3047 
3048  ost->st->time_base = ctx->time_base_out;
3049 
3050  return 0;
3051 }
3052 
3054 {
3055  OutputFile *of = output_files[ost->file_index];
3056  InputStream *ist = get_input_stream(ost);
3057  AVCodecParameters *par_dst = ost->st->codecpar;
3058  AVCodecParameters *par_src = ost->ref_par;
3059  AVRational sar;
3060  int i, ret;
3061  uint32_t codec_tag = par_dst->codec_tag;
3062 
3063  av_assert0(ist && !ost->filter);
3064 
3065  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3066  if (ret >= 0)
3067  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3068  if (ret < 0) {
3070  "Error setting up codec context options.\n");
3071  return ret;
3072  }
3074 
3075  if (!codec_tag) {
3076  unsigned int codec_tag_tmp;
3077  if (!of->ctx->oformat->codec_tag ||
3078  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3079  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3080  codec_tag = par_src->codec_tag;
3081  }
3082 
3083  ret = avcodec_parameters_copy(par_dst, par_src);
3084  if (ret < 0)
3085  return ret;
3086 
3087  par_dst->codec_tag = codec_tag;
3088 
3089  if (!ost->frame_rate.num)
3090  ost->frame_rate = ist->framerate;
3091  ost->st->avg_frame_rate = ost->frame_rate;
3092 
3094  if (ret < 0)
3095  return ret;
3096 
3097  // copy timebase while removing common factors
3098  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3100 
3101  // copy estimated duration as a hint to the muxer
3102  if (ost->st->duration <= 0 && ist->st->duration > 0)
3103  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3104 
3105  // copy disposition
3106  ost->st->disposition = ist->st->disposition;
3107 
3108  if (ist->st->nb_side_data) {
3109  for (i = 0; i < ist->st->nb_side_data; i++) {
3110  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3111  uint8_t *dst_data;
3112 
3113  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3114  if (!dst_data)
3115  return AVERROR(ENOMEM);
3116  memcpy(dst_data, sd_src->data, sd_src->size);
3117  }
3118  }
3119 
3120  if (ost->rotate_overridden) {
3122  sizeof(int32_t) * 9);
3123  if (sd)
3124  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3125  }
3126 
3127  switch (par_dst->codec_type) {
3128  case AVMEDIA_TYPE_AUDIO:
3129  if (audio_volume != 256) {
3130  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3131  exit_program(1);
3132  }
3133  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3134  par_dst->block_align= 0;
3135  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3136  par_dst->block_align= 0;
3137  break;
3138  case AVMEDIA_TYPE_VIDEO:
3139  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3140  sar =
3141  av_mul_q(ost->frame_aspect_ratio,
3142  (AVRational){ par_dst->height, par_dst->width });
3143  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3144  "with stream copy may produce invalid files\n");
3145  }
3146  else if (ist->st->sample_aspect_ratio.num)
3147  sar = ist->st->sample_aspect_ratio;
3148  else
3149  sar = par_src->sample_aspect_ratio;
3150  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3151  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3152  ost->st->r_frame_rate = ist->st->r_frame_rate;
3153  break;
3154  }
3155 
3156  ost->mux_timebase = ist->st->time_base;
3157 
3158  return 0;
3159 }
3160 
3162 {
3163  AVDictionaryEntry *e;
3164 
3165  uint8_t *encoder_string;
3166  int encoder_string_len;
3167  int format_flags = 0;
3168  int codec_flags = ost->enc_ctx->flags;
3169 
3170  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3171  return;
3172 
3173  e = av_dict_get(of->opts, "fflags", NULL, 0);
3174  if (e) {
3175  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3176  if (!o)
3177  return;
3178  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3179  }
3180  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3181  if (e) {
3182  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3183  if (!o)
3184  return;
3185  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3186  }
3187 
3188  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3189  encoder_string = av_mallocz(encoder_string_len);
3190  if (!encoder_string)
3191  exit_program(1);
3192 
3193  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3194  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3195  else
3196  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3197  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3198  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3200 }
3201 
3203  AVCodecContext *avctx)
3204 {
3205  char *p;
3206  int n = 1, i, size, index = 0;
3207  int64_t t, *pts;
3208 
3209  for (p = kf; *p; p++)
3210  if (*p == ',')
3211  n++;
3212  size = n;
3213  pts = av_malloc_array(size, sizeof(*pts));
3214  if (!pts) {
3215  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3216  exit_program(1);
3217  }
3218 
3219  p = kf;
3220  for (i = 0; i < n; i++) {
3221  char *next = strchr(p, ',');
3222 
3223  if (next)
3224  *next++ = 0;
3225 
3226  if (!memcmp(p, "chapters", 8)) {
3227 
3228  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3229  int j;
3230 
3231  if (avf->nb_chapters > INT_MAX - size ||
3232  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3233  sizeof(*pts)))) {
3235  "Could not allocate forced key frames array.\n");
3236  exit_program(1);
3237  }
3238  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3239  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3240 
3241  for (j = 0; j < avf->nb_chapters; j++) {
3242  AVChapter *c = avf->chapters[j];
3243  av_assert1(index < size);
3244  pts[index++] = av_rescale_q(c->start, c->time_base,
3245  avctx->time_base) + t;
3246  }
3247 
3248  } else {
3249 
3250  t = parse_time_or_die("force_key_frames", p, 1);
3251  av_assert1(index < size);
3252  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3253 
3254  }
3255 
3256  p = next;
3257  }
3258 
3259  av_assert0(index == size);
3260  qsort(pts, size, sizeof(*pts), compare_int64);
3261  ost->forced_kf_count = size;
3262  ost->forced_kf_pts = pts;
3263 }
3264 
3265 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3266 {
3267  InputStream *ist = get_input_stream(ost);
3268  AVCodecContext *enc_ctx = ost->enc_ctx;
3269  AVFormatContext *oc;
3270 
3271  if (ost->enc_timebase.num > 0) {
3272  enc_ctx->time_base = ost->enc_timebase;
3273  return;
3274  }
3275 
3276  if (ost->enc_timebase.num < 0) {
3277  if (ist) {
3278  enc_ctx->time_base = ist->st->time_base;
3279  return;
3280  }
3281 
3282  oc = output_files[ost->file_index]->ctx;
3283  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3284  }
3285 
3286  enc_ctx->time_base = default_time_base;
3287 }
3288 
3290 {
3291  InputStream *ist = get_input_stream(ost);
3292  AVCodecContext *enc_ctx = ost->enc_ctx;
3294  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3295  int j, ret;
3296 
3297  set_encoder_id(output_files[ost->file_index], ost);
3298 
3299  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3300  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3301  // which have to be filtered out to prevent leaking them to output files.
3302  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3303 
3304  if (ist) {
3305  ost->st->disposition = ist->st->disposition;
3306 
3307  dec_ctx = ist->dec_ctx;
3308 
3309  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3310  } else {
3311  for (j = 0; j < oc->nb_streams; j++) {
3312  AVStream *st = oc->streams[j];
3313  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3314  break;
3315  }
3316  if (j == oc->nb_streams)
3317  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3320  }
3321 
3322  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3323  if (!ost->frame_rate.num)
3325  if (ist && !ost->frame_rate.num)
3326  ost->frame_rate = ist->framerate;
3327  if (ist && !ost->frame_rate.num)
3328  ost->frame_rate = ist->st->r_frame_rate;
3329  if (ist && !ost->frame_rate.num) {
3330  ost->frame_rate = (AVRational){25, 1};
3332  "No information "
3333  "about the input framerate is available. Falling "
3334  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3335  "if you want a different framerate.\n",
3336  ost->file_index, ost->index);
3337  }
3338 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3339  if (ost->enc->supported_framerates && !ost->force_fps) {
3340  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3341  ost->frame_rate = ost->enc->supported_framerates[idx];
3342  }
3343  // reduce frame rate for mpeg4 to be within the spec limits
3344  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3345  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3346  ost->frame_rate.num, ost->frame_rate.den, 65535);
3347  }
3348  }
3349 
3350  switch (enc_ctx->codec_type) {
3351  case AVMEDIA_TYPE_AUDIO:
3353  if (dec_ctx)
3354  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3355  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3359 
3360  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3361  break;
3362 
3363  case AVMEDIA_TYPE_VIDEO:
3365 
3366  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3368  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3370  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3371  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3372  }
3373  for (j = 0; j < ost->forced_kf_count; j++)
3374  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3376  enc_ctx->time_base);
3377 
3378  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3379  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3380  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3381  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3382  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3384 
3385  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3386  if (dec_ctx)
3387  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3388  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3389 
3390  enc_ctx->framerate = ost->frame_rate;
3391 
3392  ost->st->avg_frame_rate = ost->frame_rate;
3393 
3394  if (!dec_ctx ||
3395  enc_ctx->width != dec_ctx->width ||
3396  enc_ctx->height != dec_ctx->height ||
3397  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3399  }
3400 
3401  if (ost->top_field_first == 0) {
3402  enc_ctx->field_order = AV_FIELD_BB;
3403  } else if (ost->top_field_first == 1) {
3404  enc_ctx->field_order = AV_FIELD_TT;
3405  }
3406 
3407  if (ost->forced_keyframes) {
3408  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3411  if (ret < 0) {
3413  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3414  return ret;
3415  }
3420 
3421  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3422  // parse it only for static kf timings
3423  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3425  }
3426  }
3427  break;
3428  case AVMEDIA_TYPE_SUBTITLE:
3429  enc_ctx->time_base = AV_TIME_BASE_Q;
3430  if (!enc_ctx->width) {
3431  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3432  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3433  }
3434  break;
3435  case AVMEDIA_TYPE_DATA:
3436  break;
3437  default:
3438  abort();
3439  break;
3440  }
3441 
3442  ost->mux_timebase = enc_ctx->time_base;
3443 
3444  return 0;
3445 }
3446 
3447 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3448 {
3449  int ret = 0;
3450 
3451  if (ost->encoding_needed) {
3452  AVCodec *codec = ost->enc;
3453  AVCodecContext *dec = NULL;
3454  InputStream *ist;
3455 
3456  ret = init_output_stream_encode(ost);
3457  if (ret < 0)
3458  return ret;
3459 
3460  if ((ist = get_input_stream(ost)))
3461  dec = ist->dec_ctx;
3462  if (dec && dec->subtitle_header) {
3463  /* ASS code assumes this buffer is null terminated so add extra byte. */
3465  if (!ost->enc_ctx->subtitle_header)
3466  return AVERROR(ENOMEM);
3467  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3469  }
3470  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3471  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3472  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3473  !codec->defaults &&
3474  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3475  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3476  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3477 
3478  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3482  if (!ost->enc_ctx->hw_frames_ctx)
3483  return AVERROR(ENOMEM);
3484  } else {
3485  ret = hw_device_setup_for_encode(ost);
3486  if (ret < 0) {
3487  snprintf(error, error_len, "Device setup failed for "
3488  "encoder on output stream #%d:%d : %s",
3489  ost->file_index, ost->index, av_err2str(ret));
3490  return ret;
3491  }
3492  }
3493  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3494  int input_props = 0, output_props = 0;
3495  AVCodecDescriptor const *input_descriptor =
3497  AVCodecDescriptor const *output_descriptor =
3499  if (input_descriptor)
3500  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3501  if (output_descriptor)
3502  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3503  if (input_props && output_props && input_props != output_props) {
3504  snprintf(error, error_len,
3505  "Subtitle encoding currently only possible from text to text "
3506  "or bitmap to bitmap");
3507  return AVERROR_INVALIDDATA;
3508  }
3509  }
3510 
3511  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3512  if (ret == AVERROR_EXPERIMENTAL)
3513  abort_codec_experimental(codec, 1);
3514  snprintf(error, error_len,
3515  "Error while opening encoder for output stream #%d:%d - "
3516  "maybe incorrect parameters such as bit_rate, rate, width or height",
3517  ost->file_index, ost->index);
3518  return ret;
3519  }
3520  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3521  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3523  ost->enc_ctx->frame_size);
3525  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3526  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3527  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3528  " It takes bits/s as argument, not kbits/s\n");
3529 
3531  if (ret < 0) {
3533  "Error initializing the output stream codec context.\n");
3534  exit_program(1);
3535  }
3536  /*
3537  * FIXME: ost->st->codec should't be needed here anymore.
3538  */
3539  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3540  if (ret < 0)
3541  return ret;
3542 
3543  if (ost->enc_ctx->nb_coded_side_data) {
3544  int i;
3545 
3546  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3547  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3548  uint8_t *dst_data;
3549 
3550  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3551  if (!dst_data)
3552  return AVERROR(ENOMEM);
3553  memcpy(dst_data, sd_src->data, sd_src->size);
3554  }
3555  }
3556 
3557  /*
3558  * Add global input side data. For now this is naive, and copies it
3559  * from the input stream's global side data. All side data should
3560  * really be funneled over AVFrame and libavfilter, then added back to
3561  * packet side data, and then potentially using the first packet for
3562  * global side data.
3563  */
3564  if (ist) {
3565  int i;
3566  for (i = 0; i < ist->st->nb_side_data; i++) {
3567  AVPacketSideData *sd = &ist->st->side_data[i];
3568  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3569  if (!dst)
3570  return AVERROR(ENOMEM);
3571  memcpy(dst, sd->data, sd->size);
3572  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3573  av_display_rotation_set((uint32_t *)dst, 0);
3574  }
3575  }
3576 
3577  // copy timebase while removing common factors
3578  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3579  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3580 
3581  // copy estimated duration as a hint to the muxer
3582  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3583  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3584 
3585  ost->st->codec->codec= ost->enc_ctx->codec;
3586  } else if (ost->stream_copy) {
3588  if (ret < 0)
3589  return ret;
3590  }
3591 
3592  // parse user provided disposition, and update stream values
3593  if (ost->disposition) {
3594  static const AVOption opts[] = {
3595  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3596  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3597  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3598  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3599  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3600  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3601  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3602  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3603  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3604  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3605  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3606  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3607  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3608  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3609  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3610  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3611  { NULL },
3612  };
3613  static const AVClass class = {
3614  .class_name = "",
3615  .item_name = av_default_item_name,
3616  .option = opts,
3617  .version = LIBAVUTIL_VERSION_INT,
3618  };
3619  const AVClass *pclass = &class;
3620 
3621  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3622  if (ret < 0)
3623  return ret;
3624  }
3625 
3626  /* initialize bitstream filters for the output stream
3627  * needs to be done here, because the codec id for streamcopy is not
3628  * known until now */
3629  ret = init_output_bsfs(ost);
3630  if (ret < 0)
3631  return ret;
3632 
3633  ost->initialized = 1;
3634 
3635  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3636  if (ret < 0)
3637  return ret;
3638 
3639  return ret;
3640 }
3641 
3642 static void report_new_stream(int input_index, AVPacket *pkt)
3643 {
3644  InputFile *file = input_files[input_index];
3645  AVStream *st = file->ctx->streams[pkt->stream_index];
3646 
3647  if (pkt->stream_index < file->nb_streams_warn)
3648  return;
3649  av_log(file->ctx, AV_LOG_WARNING,
3650  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3652  input_index, pkt->stream_index,
3653  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3654  file->nb_streams_warn = pkt->stream_index + 1;
3655 }
3656 
3657 static int transcode_init(void)
3658 {
3659  int ret = 0, i, j, k;
3660  AVFormatContext *oc;
3661  OutputStream *ost;
3662  InputStream *ist;
3663  char error[1024] = {0};
3664 
3665  for (i = 0; i < nb_filtergraphs; i++) {
3666  FilterGraph *fg = filtergraphs[i];
3667  for (j = 0; j < fg->nb_outputs; j++) {
3668  OutputFilter *ofilter = fg->outputs[j];
3669  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3670  continue;
3671  if (fg->nb_inputs != 1)
3672  continue;
3673  for (k = nb_input_streams-1; k >= 0 ; k--)
3674  if (fg->inputs[0]->ist == input_streams[k])
3675  break;
3676  ofilter->ost->source_index = k;
3677  }
3678  }
3679 
3680  /* init framerate emulation */
3681  for (i = 0; i < nb_input_files; i++) {
3682  InputFile *ifile = input_files[i];
3683  if (ifile->rate_emu)
3684  for (j = 0; j < ifile->nb_streams; j++)
3685  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3686  }
3687 
3688  /* init input streams */
3689  for (i = 0; i < nb_input_streams; i++)
3690  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3691  for (i = 0; i < nb_output_streams; i++) {
3692  ost = output_streams[i];
3693  avcodec_close(ost->enc_ctx);
3694  }
3695  goto dump_format;
3696  }
3697 
3698  /* open each encoder */
3699  for (i = 0; i < nb_output_streams; i++) {
3700  // skip streams fed from filtergraphs until we have a frame for them
3701  if (output_streams[i]->filter)
3702  continue;
3703 
3704  ret = init_output_stream(output_streams[i], error, sizeof(error));
3705  if (ret < 0)
3706  goto dump_format;
3707  }
3708 
3709  /* discard unused programs */
3710  for (i = 0; i < nb_input_files; i++) {
3711  InputFile *ifile = input_files[i];
3712  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3713  AVProgram *p = ifile->ctx->programs[j];
3714  int discard = AVDISCARD_ALL;
3715 
3716  for (k = 0; k < p->nb_stream_indexes; k++)
3717  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3718  discard = AVDISCARD_DEFAULT;
3719  break;
3720  }
3721  p->discard = discard;
3722  }
3723  }
3724 
3725  /* write headers for files with no streams */
3726  for (i = 0; i < nb_output_files; i++) {
3727  oc = output_files[i]->ctx;
3728  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3729  ret = check_init_output_file(output_files[i], i);
3730  if (ret < 0)
3731  goto dump_format;
3732  }
3733  }
3734 
3735  dump_format:
3736  /* dump the stream mapping */
3737  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3738  for (i = 0; i < nb_input_streams; i++) {
3739  ist = input_streams[i];
3740 
3741  for (j = 0; j < ist->nb_filters; j++) {
3742  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3743  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3744  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3745  ist->filters[j]->name);
3746  if (nb_filtergraphs > 1)
3747  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3748  av_log(NULL, AV_LOG_INFO, "\n");
3749  }
3750  }
3751  }
3752 
3753  for (i = 0; i < nb_output_streams; i++) {
3754  ost = output_streams[i];
3755 
3756  if (ost->attachment_filename) {
3757  /* an attached file */
3758  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3759  ost->attachment_filename, ost->file_index, ost->index);
3760  continue;
3761  }
3762 
3763  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3764  /* output from a complex graph */
3765  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3766  if (nb_filtergraphs > 1)
3767  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3768 
3769  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3770  ost->index, ost->enc ? ost->enc->name : "?");
3771  continue;
3772  }
3773 
3774  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3775  input_streams[ost->source_index]->file_index,
3776  input_streams[ost->source_index]->st->index,
3777  ost->file_index,
3778  ost->index);
3779  if (ost->sync_ist != input_streams[ost->source_index])
3780  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3781  ost->sync_ist->file_index,
3782  ost->sync_ist->st->index);
3783  if (ost->stream_copy)
3784  av_log(NULL, AV_LOG_INFO, " (copy)");
3785  else {
3786  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3787  const AVCodec *out_codec = ost->enc;
3788  const char *decoder_name = "?";
3789  const char *in_codec_name = "?";
3790  const char *encoder_name = "?";
3791  const char *out_codec_name = "?";
3792  const AVCodecDescriptor *desc;
3793 
3794  if (in_codec) {
3795  decoder_name = in_codec->name;
3796  desc = avcodec_descriptor_get(in_codec->id);
3797  if (desc)
3798  in_codec_name = desc->name;
3799  if (!strcmp(decoder_name, in_codec_name))
3800  decoder_name = "native";
3801  }
3802 
3803  if (out_codec) {
3804  encoder_name = out_codec->name;
3805  desc = avcodec_descriptor_get(out_codec->id);
3806  if (desc)
3807  out_codec_name = desc->name;
3808  if (!strcmp(encoder_name, out_codec_name))
3809  encoder_name = "native";
3810  }
3811 
3812  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3813  in_codec_name, decoder_name,
3814  out_codec_name, encoder_name);
3815  }
3816  av_log(NULL, AV_LOG_INFO, "\n");
3817  }
3818 
3819  if (ret) {
3820  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3821  return ret;
3822  }
3823 
3825 
3826  return 0;
3827 }
3828 
3829 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3830 static int need_output(void)
3831 {
3832  int i;
3833 
3834  for (i = 0; i < nb_output_streams; i++) {
3835  OutputStream *ost = output_streams[i];
3836  OutputFile *of = output_files[ost->file_index];
3837  AVFormatContext *os = output_files[ost->file_index]->ctx;
3838 
3839  if (ost->finished ||
3840  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3841  continue;
3842  if (ost->frame_number >= ost->max_frames) {
3843  int j;
3844  for (j = 0; j < of->ctx->nb_streams; j++)
3845  close_output_stream(output_streams[of->ost_index + j]);
3846  continue;
3847  }
3848 
3849  return 1;
3850  }
3851 
3852  return 0;
3853 }
3854 
3855 /**
3856  * Select the output stream to process.
3857  *
3858  * @return selected output stream, or NULL if none available
3859  */
3861 {
3862  int i;
3863  int64_t opts_min = INT64_MAX;
3864  OutputStream *ost_min = NULL;
3865 
3866  for (i = 0; i < nb_output_streams; i++) {
3867  OutputStream *ost = output_streams[i];
3868  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3869  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3870  AV_TIME_BASE_Q);
3871  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3872  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3873 
3874  if (!ost->initialized && !ost->inputs_done)
3875  return ost;
3876 
3877  if (!ost->finished && opts < opts_min) {
3878  opts_min = opts;
3879  ost_min = ost->unavailable ? NULL : ost;
3880  }
3881  }
3882  return ost_min;
3883 }
3884 
3885 static void set_tty_echo(int on)
3886 {
3887 #if HAVE_TERMIOS_H
3888  struct termios tty;
3889  if (tcgetattr(0, &tty) == 0) {
3890  if (on) tty.c_lflag |= ECHO;
3891  else tty.c_lflag &= ~ECHO;
3892  tcsetattr(0, TCSANOW, &tty);
3893  }
3894 #endif
3895 }
3896 
3897 static int check_keyboard_interaction(int64_t cur_time)
3898 {
3899  int i, ret, key;
3900  static int64_t last_time;
3901  if (received_nb_signals)
3902  return AVERROR_EXIT;
3903  /* read_key() returns 0 on EOF */
3904  if(cur_time - last_time >= 100000 && !run_as_daemon){
3905  key = read_key();
3906  last_time = cur_time;
3907  }else
3908  key = -1;
3909  if (key == 'q')
3910  return AVERROR_EXIT;
3911  if (key == '+') av_log_set_level(av_log_get_level()+10);
3912  if (key == '-') av_log_set_level(av_log_get_level()-10);
3913  if (key == 's') qp_hist ^= 1;
3914  if (key == 'h'){
3915  if (do_hex_dump){
3916  do_hex_dump = do_pkt_dump = 0;
3917  } else if(do_pkt_dump){
3918  do_hex_dump = 1;
3919  } else
3920  do_pkt_dump = 1;
3922  }
3923  if (key == 'c' || key == 'C'){
3924  char buf[4096], target[64], command[256], arg[256] = {0};
3925  double time;
3926  int k, n = 0;
3927  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3928  i = 0;
3929  set_tty_echo(1);
3930  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3931  if (k > 0)
3932  buf[i++] = k;
3933  buf[i] = 0;
3934  set_tty_echo(0);
3935  fprintf(stderr, "\n");
3936  if (k > 0 &&
3937  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3938  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3939  target, time, command, arg);
3940  for (i = 0; i < nb_filtergraphs; i++) {
3941  FilterGraph *fg = filtergraphs[i];
3942  if (fg->graph) {
3943  if (time < 0) {
3944  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3945  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3946  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3947  } else if (key == 'c') {
3948  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3949  ret = AVERROR_PATCHWELCOME;
3950  } else {
3951  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3952  if (ret < 0)
3953  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3954  }
3955  }
3956  }
3957  } else {
3959  "Parse error, at least 3 arguments were expected, "
3960  "only %d given in string '%s'\n", n, buf);
3961  }
3962  }
3963  if (key == 'd' || key == 'D'){
3964  int debug=0;
3965  if(key == 'D') {
3966  debug = input_streams[0]->st->codec->debug<<1;
3967  if(!debug) debug = 1;
3968  while(debug & (FF_DEBUG_DCT_COEFF
3969 #if FF_API_DEBUG_MV
3970  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3971 #endif
3972  )) //unsupported, would just crash
3973  debug += debug;
3974  }else{
3975  char buf[32];
3976  int k = 0;
3977  i = 0;
3978  set_tty_echo(1);
3979  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3980  if (k > 0)
3981  buf[i++] = k;
3982  buf[i] = 0;
3983  set_tty_echo(0);
3984  fprintf(stderr, "\n");
3985  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3986  fprintf(stderr,"error parsing debug value\n");
3987  }
3988  for(i=0;i<nb_input_streams;i++) {
3989  input_streams[i]->st->codec->debug = debug;
3990  }
3991  for(i=0;i<nb_output_streams;i++) {
3992  OutputStream *ost = output_streams[i];
3993  ost->enc_ctx->debug = debug;
3994  }
3995  if(debug) av_log_set_level(AV_LOG_DEBUG);
3996  fprintf(stderr,"debug=%d\n", debug);
3997  }
3998  if (key == '?'){
3999  fprintf(stderr, "key function\n"
4000  "? show this help\n"
4001  "+ increase verbosity\n"
4002  "- decrease verbosity\n"
4003  "c Send command to first matching filter supporting it\n"
4004  "C Send/Queue command to all matching filters\n"
4005  "D cycle through available debug modes\n"
4006  "h dump packets/hex press to cycle through the 3 states\n"
4007  "q quit\n"
4008  "s Show QP histogram\n"
4009  );
4010  }
4011  return 0;
4012 }
4013 
4014 #if HAVE_THREADS
4015 static void *input_thread(void *arg)
4016 {
4017  InputFile *f = arg;
4018  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4019  int ret = 0;
4020 
4021  while (1) {
4022  AVPacket pkt;
4023  ret = av_read_frame(f->ctx, &pkt);
4024 
4025  if (ret == AVERROR(EAGAIN)) {
4026  av_usleep(10000);
4027  continue;
4028  }
4029  if (ret < 0) {
4030  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4031  break;
4032  }
4033  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4034  if (flags && ret == AVERROR(EAGAIN)) {
4035  flags = 0;
4036  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4038  "Thread message queue blocking; consider raising the "
4039  "thread_queue_size option (current value: %d)\n",
4040  f->thread_queue_size);
4041  }
4042  if (ret < 0) {
4043  if (ret != AVERROR_EOF)
4044  av_log(f->ctx, AV_LOG_ERROR,
4045  "Unable to send packet to main thread: %s\n",
4046  av_err2str(ret));
4047  av_packet_unref(&pkt);
4048  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4049  break;
4050  }
4051  }
4052 
4053  return NULL;
4054 }
4055 
4056 static void free_input_thread(int i)
4057 {
4058  InputFile *f = input_files[i];
4059  AVPacket pkt;
4060 
4061  if (!f || !f->in_thread_queue)
4062  return;
4064  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4065  av_packet_unref(&pkt);
4066 
4067  pthread_join(f->thread, NULL);
4068  f->joined = 1;
4069  av_thread_message_queue_free(&f->in_thread_queue);
4070 }
4071 
4072 static void free_input_threads(void)
4073 {
4074  int i;
4075 
4076  for (i = 0; i < nb_input_files; i++)
4077  free_input_thread(i);
4078 }
4079 
4080 static int init_input_thread(int i)
4081 {
4082  int ret;
4083  InputFile *f = input_files[i];
4084 
4085  if (nb_input_files == 1)
4086  return 0;
4087 
4088  if (f->ctx->pb ? !f->ctx->pb->seekable :
4089  strcmp(f->ctx->iformat->name, "lavfi"))
4090  f->non_blocking = 1;
4091  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4092  f->thread_queue_size, sizeof(AVPacket));
4093  if (ret < 0)
4094  return ret;
4095 
4096  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4097  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4098  av_thread_message_queue_free(&f->in_thread_queue);
4099  return AVERROR(ret);
4100  }
4101 
4102  return 0;
4103 }
4104 
4105 static int init_input_threads(void)
4106 {
4107  int i, ret;
4108 
4109  for (i = 0; i < nb_input_files; i++) {
4110  ret = init_input_thread(i);
4111  if (ret < 0)
4112  return ret;
4113  }
4114  return 0;
4115 }
4116 
4117 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4118 {
4119  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4120  f->non_blocking ?
4122 }
4123 #endif
4124 
4126 {
4127  if (f->rate_emu) {
4128  int i;
4129  for (i = 0; i < f->nb_streams; i++) {
4130  InputStream *ist = input_streams[f->ist_index + i];
4131  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4132  int64_t now = av_gettime_relative() - ist->start;
4133  if (pts > now)
4134  return AVERROR(EAGAIN);
4135  }
4136  }
4137 
4138 #if HAVE_THREADS
4139  if (nb_input_files > 1)
4140  return get_input_packet_mt(f, pkt);
4141 #endif
4142  return av_read_frame(f->ctx, pkt);
4143 }
4144 
4145 static int got_eagain(void)
4146 {
4147  int i;
4148  for (i = 0; i < nb_output_streams; i++)
4149  if (output_streams[i]->unavailable)
4150  return 1;
4151  return 0;
4152 }
4153 
4154 static void reset_eagain(void)
4155 {
4156  int i;
4157  for (i = 0; i < nb_input_files; i++)
4158  input_files[i]->eagain = 0;
4159  for (i = 0; i < nb_output_streams; i++)
4160  output_streams[i]->unavailable = 0;
4161 }
4162 
4163 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4164 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4165  AVRational time_base)
4166 {
4167  int ret;
4168 
4169  if (!*duration) {
4170  *duration = tmp;
4171  return tmp_time_base;
4172  }
4173 
4174  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4175  if (ret < 0) {
4176  *duration = tmp;
4177  return tmp_time_base;
4178  }
4179 
4180  return time_base;
4181 }
4182 
4184 {
4185  InputStream *ist;
4186  AVCodecContext *avctx;
4187  int i, ret, has_audio = 0;
4188  int64_t duration = 0;
4189 
4190  ret = av_seek_frame(is, -1, is->start_time, 0);
4191  if (ret < 0)
4192  return ret;
4193 
4194  for (i = 0; i < ifile->nb_streams; i++) {
4195  ist = input_streams[ifile->ist_index + i];
4196  avctx = ist->dec_ctx;
4197 
4198  /* duration is the length of the last frame in a stream
4199  * when audio stream is present we don't care about
4200  * last video frame length because it's not defined exactly */
4201  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4202  has_audio = 1;
4203  }
4204 
4205  for (i = 0; i < ifile->nb_streams; i++) {
4206  ist = input_streams[ifile->ist_index + i];
4207  avctx = ist->dec_ctx;
4208 
4209  if (has_audio) {
4210  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4211  AVRational sample_rate = {1, avctx->sample_rate};
4212 
4213  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4214  } else {
4215  continue;
4216  }
4217  } else {
4218  if (ist->framerate.num) {
4219  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4220  } else if (ist->st->avg_frame_rate.num) {
4221  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4222  } else {
4223  duration = 1;
4224  }
4225  }
4226  if (!ifile->duration)
4227  ifile->time_base = ist->st->time_base;
4228  /* the total duration of the stream, max_pts - min_pts is
4229  * the duration of the stream without the last frame */
4230  duration += ist->max_pts - ist->min_pts;
4231  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4232  ifile->time_base);
4233  }
4234 
4235  if (ifile->loop > 0)
4236  ifile->loop--;
4237 
4238  return ret;
4239 }
4240 
4241 /*
4242  * Return
4243  * - 0 -- one packet was read and processed
4244  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4245  * this function should be called again
4246  * - AVERROR_EOF -- this function should not be called again
4247  */
4248 static int process_input(int file_index)
4249 {
4250  InputFile *ifile = input_files[file_index];
4251  AVFormatContext *is;
4252  InputStream *ist;
4253  AVPacket pkt;
4254  int ret, thread_ret, i, j;
4255  int64_t duration;
4256  int64_t pkt_dts;
4257 
4258  is = ifile->ctx;
4259  ret = get_input_packet(ifile, &pkt);
4260 
4261  if (ret == AVERROR(EAGAIN)) {
4262  ifile->eagain = 1;
4263  return ret;
4264  }
4265  if (ret < 0 && ifile->loop) {
4266  AVCodecContext *avctx;
4267  for (i = 0; i < ifile->nb_streams; i++) {
4268  ist = input_streams[ifile->ist_index + i];
4269  avctx = ist->dec_ctx;
4270  if (ist->decoding_needed) {
4271  ret = process_input_packet(ist, NULL, 1);
4272  if (ret>0)
4273  return 0;
4274  avcodec_flush_buffers(avctx);
4275  }
4276  }
4277 #if HAVE_THREADS
4278  free_input_thread(file_index);
4279 #endif
4280  ret = seek_to_start(ifile, is);
4281 #if HAVE_THREADS
4282  thread_ret = init_input_thread(file_index);
4283  if (thread_ret < 0)
4284  return thread_ret;
4285 #endif
4286  if (ret < 0)
4287  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4288  else
4289  ret = get_input_packet(ifile, &pkt);
4290  if (ret == AVERROR(EAGAIN)) {
4291  ifile->eagain = 1;
4292  return ret;
4293  }
4294  }
4295  if (ret < 0) {
4296  if (ret != AVERROR_EOF) {
4297  print_error(is->url, ret);
4298  if (exit_on_error)
4299  exit_program(1);
4300  }
4301 
4302  for (i = 0; i < ifile->nb_streams; i++) {
4303  ist = input_streams[ifile->ist_index + i];
4304  if (ist->decoding_needed) {
4305  ret = process_input_packet(ist, NULL, 0);
4306  if (ret>0)
4307  return 0;
4308  }
4309 
4310  /* mark all outputs that don't go through lavfi as finished */
4311  for (j = 0; j < nb_output_streams; j++) {
4312  OutputStream *ost = output_streams[j];
4313 
4314  if (ost->source_index == ifile->ist_index + i &&
4315  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4316  finish_output_stream(ost);
4317  }
4318  }
4319 
4320  ifile->eof_reached = 1;
4321  return AVERROR(EAGAIN);
4322  }
4323 
4324  reset_eagain();
4325 
4326  if (do_pkt_dump) {
4328  is->streams[pkt.stream_index]);
4329  }
4330  /* the following test is needed in case new streams appear
4331  dynamically in stream : we ignore them */
4332  if (pkt.stream_index >= ifile->nb_streams) {
4333  report_new_stream(file_index, &pkt);
4334  goto discard_packet;
4335  }
4336 
4337  ist = input_streams[ifile->ist_index + pkt.stream_index];
4338 
4339  ist->data_size += pkt.size;
4340  ist->nb_packets++;
4341 
4342  if (ist->discard)
4343  goto discard_packet;
4344 
4345  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4346  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4347  exit_program(1);
4348  }
4349 
4350  if (debug_ts) {
4351  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4352  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4356  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4357  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4358  av_ts2str(input_files[ist->file_index]->ts_offset),
4359  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4360  }
4361 
4362  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4363  int64_t stime, stime2;
4364  // Correcting starttime based on the enabled streams
4365  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4366  // so we instead do it here as part of discontinuity handling
4367  if ( ist->next_dts == AV_NOPTS_VALUE
4368  && ifile->ts_offset == -is->start_time
4369  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4370  int64_t new_start_time = INT64_MAX;
4371  for (i=0; i<is->nb_streams; i++) {
4372  AVStream *st = is->streams[i];
4373  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4374  continue;
4375  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4376  }
4377  if (new_start_time > is->start_time) {
4378  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4379  ifile->ts_offset = -new_start_time;
4380  }
4381  }
4382 
4383  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4384  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4385  ist->wrap_correction_done = 1;
4386 
4387  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4388  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4389  ist->wrap_correction_done = 0;
4390  }
4391  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4392  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4393  ist->wrap_correction_done = 0;
4394  }
4395  }
4396 
4397  /* add the stream-global side data to the first packet */
4398  if (ist->nb_packets == 1) {
4399  for (i = 0; i < ist->st->nb_side_data; i++) {
4400  AVPacketSideData *src_sd = &ist->st->side_data[i];
4401  uint8_t *dst_data;
4402 
4403  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4404  continue;
4405 
4406  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4407  continue;
4408 
4409  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4410  if (!dst_data)
4411  exit_program(1);
4412 
4413  memcpy(dst_data, src_sd->data, src_sd->size);
4414  }
4415  }
4416 
4417  if (pkt.dts != AV_NOPTS_VALUE)
4418  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4419  if (pkt.pts != AV_NOPTS_VALUE)
4420  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4421 
4422  if (pkt.pts != AV_NOPTS_VALUE)
4423  pkt.pts *= ist->ts_scale;
4424  if (pkt.dts != AV_NOPTS_VALUE)
4425  pkt.dts *= ist->ts_scale;
4426 
4428  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4430  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4431  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4432  int64_t delta = pkt_dts - ifile->last_ts;
4433  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4434  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4435  ifile->ts_offset -= delta;
4437  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4438  delta, ifile->ts_offset);
4439  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4440  if (pkt.pts != AV_NOPTS_VALUE)
4441  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4442  }
4443  }
4444 
4445  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4446  if (pkt.pts != AV_NOPTS_VALUE) {
4447  pkt.pts += duration;
4448  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4449  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4450  }
4451 
4452  if (pkt.dts != AV_NOPTS_VALUE)
4453  pkt.dts += duration;
4454 
4456  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4458  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4459  !copy_ts) {
4460  int64_t delta = pkt_dts - ist->next_dts;
4461  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4462  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4463  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4464  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4465  ifile->ts_offset -= delta;
4467  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4468  delta, ifile->ts_offset);
4469  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4470  if (pkt.pts != AV_NOPTS_VALUE)
4471  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4472  }
4473  } else {
4474  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4475  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4476  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4477  pkt.dts = AV_NOPTS_VALUE;
4478  }
4479  if (pkt.pts != AV_NOPTS_VALUE){
4480  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4481  delta = pkt_pts - ist->next_dts;
4482  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4483  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4484  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4485  pkt.pts = AV_NOPTS_VALUE;
4486  }
4487  }
4488  }
4489  }
4490 
4491  if (pkt.dts != AV_NOPTS_VALUE)
4492  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4493 
4494  if (debug_ts) {
4495  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4497  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4498  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4499  av_ts2str(input_files[ist->file_index]->ts_offset),
4500  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4501  }
4502 
4503  sub2video_heartbeat(ist, pkt.pts);
4504 
4505  process_input_packet(ist, &pkt, 0);
4506 
4507 discard_packet:
4508  av_packet_unref(&pkt);
4509 
4510  return 0;
4511 }
4512 
4513 /**
4514  * Perform a step of transcoding for the specified filter graph.
4515  *
4516  * @param[in] graph filter graph to consider
4517  * @param[out] best_ist input stream where a frame would allow to continue
4518  * @return 0 for success, <0 for error
4519  */
4520 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4521 {
4522  int i, ret;
4523  int nb_requests, nb_requests_max = 0;
4524  InputFilter *ifilter;
4525  InputStream *ist;
4526 
4527  *best_ist = NULL;
4528  ret = avfilter_graph_request_oldest(graph->graph);
4529  if (ret >= 0)
4530  return reap_filters(0);
4531 
4532  if (ret == AVERROR_EOF) {
4533  ret = reap_filters(1);
4534  for (i = 0; i < graph->nb_outputs; i++)
4535  close_output_stream(graph->outputs[i]->ost);
4536  return ret;
4537  }
4538  if (ret != AVERROR(EAGAIN))
4539  return ret;
4540 
4541  for (i = 0; i < graph->nb_inputs; i++) {
4542  ifilter = graph->inputs[i];
4543  ist = ifilter->ist;
4544  if (input_files[ist->file_index]->eagain ||
4545  input_files[ist->file_index]->eof_reached)
4546  continue;
4547  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4548  if (nb_requests > nb_requests_max) {
4549  nb_requests_max = nb_requests;
4550  *best_ist = ist;
4551  }
4552  }
4553 
4554  if (!*best_ist)
4555  for (i = 0; i < graph->nb_outputs; i++)
4556  graph->outputs[i]->ost->unavailable = 1;
4557 
4558  return 0;
4559 }
4560 
4561 /**
4562  * Run a single step of transcoding.
4563  *
4564  * @return 0 for success, <0 for error
4565  */
4566 static int transcode_step(void)
4567 {
4568  OutputStream *ost;
4569  InputStream *ist = NULL;
4570  int ret;
4571 
4572  ost = choose_output();
4573  if (!ost) {
4574  if (got_eagain()) {
4575  reset_eagain();
4576  av_usleep(10000);
4577  return 0;
4578  }
4579  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4580  return AVERROR_EOF;
4581  }
4582 
4583  if (ost->filter && !ost->filter->graph->graph) {
4585  ret = configure_filtergraph(ost->filter->graph);
4586  if (ret < 0) {
4587  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4588  return ret;
4589  }
4590  }
4591  }
4592 
4593  if (ost->filter && ost->filter->graph->graph) {
4594  if (!ost->initialized) {
4595  char error[1024] = {0};
4596  ret = init_output_stream(ost, error, sizeof(error));
4597  if (ret < 0) {
4598  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4599  ost->file_index, ost->index, error);
4600  exit_program(1);
4601  }
4602  }
4603  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4604  return ret;
4605  if (!ist)
4606  return 0;
4607  } else if (ost->filter) {
4608  int i;
4609  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4610  InputFilter *ifilter = ost->filter->graph->inputs[i];
4611  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4612  ist = ifilter->ist;
4613  break;
4614  }
4615  }
4616  if (!ist) {
4617  ost->inputs_done = 1;
4618  return 0;
4619  }
4620  } else {
4621  av_assert0(ost->source_index >= 0);
4622  ist = input_streams[ost->source_index];
4623  }
4624 
4625  ret = process_input(ist->file_index);
4626  if (ret ==