FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570 
573 
574  if (ost->muxing_queue) {
575  while (av_fifo_size(ost->muxing_queue)) {
576  AVPacket pkt;
577  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578  av_packet_unref(&pkt);
579  }
581  }
582 
583  av_freep(&output_streams[i]);
584  }
585 #if HAVE_THREADS
586  free_input_threads();
587 #endif
588  for (i = 0; i < nb_input_files; i++) {
589  avformat_close_input(&input_files[i]->ctx);
590  av_freep(&input_files[i]);
591  }
592  for (i = 0; i < nb_input_streams; i++) {
593  InputStream *ist = input_streams[i];
594 
597  av_dict_free(&ist->decoder_opts);
600  av_freep(&ist->filters);
601  av_freep(&ist->hwaccel_device);
602  av_freep(&ist->dts_buffer);
603 
605 
606  av_freep(&input_streams[i]);
607  }
608 
609  if (vstats_file) {
610  if (fclose(vstats_file))
612  "Error closing vstats file, loss of information possible: %s\n",
613  av_err2str(AVERROR(errno)));
614  }
616 
617  av_freep(&input_streams);
618  av_freep(&input_files);
619  av_freep(&output_streams);
620  av_freep(&output_files);
621 
622  uninit_opts();
623 
625 
626  if (received_sigterm) {
627  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628  (int) received_sigterm);
629  } else if (ret && atomic_load(&transcode_init_done)) {
630  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
631  }
632  term_exit();
633  ffmpeg_exited = 1;
634 }
635 
637 {
638  AVDictionaryEntry *t = NULL;
639 
640  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642  }
643 }
644 
646 {
648  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650  exit_program(1);
651  }
652 }
653 
654 static void abort_codec_experimental(AVCodec *c, int encoder)
655 {
656  exit_program(1);
657 }
658 
659 static void update_benchmark(const char *fmt, ...)
660 {
661  if (do_benchmark_all) {
663  va_list va;
664  char buf[1024];
665 
666  if (fmt) {
667  va_start(va, fmt);
668  vsnprintf(buf, sizeof(buf), fmt, va);
669  va_end(va);
671  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672  t.user_usec - current_time.user_usec,
673  t.sys_usec - current_time.sys_usec,
674  t.real_usec - current_time.real_usec, buf);
675  }
676  current_time = t;
677  }
678 }
679 
681 {
682  int i;
683  for (i = 0; i < nb_output_streams; i++) {
684  OutputStream *ost2 = output_streams[i];
685  ost2->finished |= ost == ost2 ? this_stream : others;
686  }
687 }
688 
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
690 {
691  AVFormatContext *s = of->ctx;
692  AVStream *st = ost->st;
693  int ret;
694 
695  /*
696  * Audio encoders may split the packets -- #frames in != #packets out.
697  * But there is no reordering, so we can limit the number of output packets
698  * by simply dropping them here.
699  * Counting encoded video frames needs to be done separately because of
700  * reordering, see do_video_out().
701  * Do not count the packet when unqueued because it has been counted when queued.
702  */
703  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704  if (ost->frame_number >= ost->max_frames) {
705  av_packet_unref(pkt);
706  return;
707  }
708  ost->frame_number++;
709  }
710 
711  if (!of->header_written) {
712  AVPacket tmp_pkt = {0};
713  /* the muxer is not initialized yet, buffer the packet */
714  if (!av_fifo_space(ost->muxing_queue)) {
715  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716  ost->max_muxing_queue_size);
717  if (new_size <= av_fifo_size(ost->muxing_queue)) {
719  "Too many packets buffered for output stream %d:%d.\n",
720  ost->file_index, ost->st->index);
721  exit_program(1);
722  }
723  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
724  if (ret < 0)
725  exit_program(1);
726  }
727  ret = av_packet_make_refcounted(pkt);
728  if (ret < 0)
729  exit_program(1);
730  av_packet_move_ref(&tmp_pkt, pkt);
731  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
732  return;
733  }
734 
737  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
738 
739  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
740  int i;
742  NULL);
743  ost->quality = sd ? AV_RL32(sd) : -1;
744  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
745 
746  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
747  if (sd && i < sd[5])
748  ost->error[i] = AV_RL64(sd + 8 + 8*i);
749  else
750  ost->error[i] = -1;
751  }
752 
753  if (ost->frame_rate.num && ost->is_cfr) {
754  if (pkt->duration > 0)
755  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
757  ost->mux_timebase);
758  }
759  }
760 
761  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
762 
763  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764  if (pkt->dts != AV_NOPTS_VALUE &&
765  pkt->pts != AV_NOPTS_VALUE &&
766  pkt->dts > pkt->pts) {
767  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
768  pkt->dts, pkt->pts,
769  ost->file_index, ost->st->index);
770  pkt->pts =
771  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
774  }
776  pkt->dts != AV_NOPTS_VALUE &&
777  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778  ost->last_mux_dts != AV_NOPTS_VALUE) {
779  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780  if (pkt->dts < max) {
781  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782  av_log(s, loglevel, "Non-monotonous DTS in output stream "
783  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
785  if (exit_on_error) {
786  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
787  exit_program(1);
788  }
789  av_log(s, loglevel, "changing to %"PRId64". This may result "
790  "in incorrect timestamps in the output file.\n",
791  max);
792  if (pkt->pts >= pkt->dts)
793  pkt->pts = FFMAX(pkt->pts, max);
794  pkt->dts = max;
795  }
796  }
797  }
798  ost->last_mux_dts = pkt->dts;
799 
800  ost->data_size += pkt->size;
801  ost->packets_written++;
802 
803  pkt->stream_index = ost->index;
804 
805  if (debug_ts) {
806  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
811  pkt->size
812  );
813  }
814 
815  ret = av_interleaved_write_frame(s, pkt);
816  if (ret < 0) {
817  print_error("av_interleaved_write_frame()", ret);
818  main_return_code = 1;
820  }
821  av_packet_unref(pkt);
822 }
823 
825 {
826  OutputFile *of = output_files[ost->file_index];
827 
828  ost->finished |= ENCODER_FINISHED;
829  if (of->shortest) {
830  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831  of->recording_time = FFMIN(of->recording_time, end);
832  }
833 }
834 
835 /*
836  * Send a single packet to the output, applying any bitstream filters
837  * associated with the output stream. This may result in any number
838  * of packets actually being written, depending on what bitstream
839  * filters are applied. The supplied packet is consumed and will be
840  * blank (as if newly-allocated) when this function returns.
841  *
842  * If eof is set, instead indicate EOF to all bitstream filters and
843  * therefore flush any delayed packets to the output. A blank packet
844  * must be supplied in this case.
845  */
847  OutputStream *ost, int eof)
848 {
849  int ret = 0;
850 
851  /* apply the output bitstream filters, if any */
852  if (ost->nb_bitstream_filters) {
853  int idx;
854 
855  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
856  if (ret < 0)
857  goto finish;
858 
859  eof = 0;
860  idx = 1;
861  while (idx) {
862  /* get a packet from the previous filter up the chain */
863  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864  if (ret == AVERROR(EAGAIN)) {
865  ret = 0;
866  idx--;
867  continue;
868  } else if (ret == AVERROR_EOF) {
869  eof = 1;
870  } else if (ret < 0)
871  goto finish;
872 
873  /* send it to the next filter down the chain or to the muxer */
874  if (idx < ost->nb_bitstream_filters) {
875  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
876  if (ret < 0)
877  goto finish;
878  idx++;
879  eof = 0;
880  } else if (eof)
881  goto finish;
882  else
883  write_packet(of, pkt, ost, 0);
884  }
885  } else if (!eof)
886  write_packet(of, pkt, ost, 0);
887 
888 finish:
889  if (ret < 0 && ret != AVERROR_EOF) {
890  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
892  if(exit_on_error)
893  exit_program(1);
894  }
895 }
896 
898 {
899  OutputFile *of = output_files[ost->file_index];
900 
901  if (of->recording_time != INT64_MAX &&
903  AV_TIME_BASE_Q) >= 0) {
904  close_output_stream(ost);
905  return 0;
906  }
907  return 1;
908 }
909 
911  AVFrame *frame)
912 {
913  AVCodecContext *enc = ost->enc_ctx;
914  AVPacket pkt;
915  int ret;
916 
917  av_init_packet(&pkt);
918  pkt.data = NULL;
919  pkt.size = 0;
920 
921  if (!check_recording_time(ost))
922  return;
923 
924  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925  frame->pts = ost->sync_opts;
926  ost->sync_opts = frame->pts + frame->nb_samples;
927  ost->samples_encoded += frame->nb_samples;
928  ost->frames_encoded++;
929 
930  av_assert0(pkt.size || !pkt.data);
932  if (debug_ts) {
933  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936  enc->time_base.num, enc->time_base.den);
937  }
938 
939  ret = avcodec_send_frame(enc, frame);
940  if (ret < 0)
941  goto error;
942 
943  while (1) {
944  ret = avcodec_receive_packet(enc, &pkt);
945  if (ret == AVERROR(EAGAIN))
946  break;
947  if (ret < 0)
948  goto error;
949 
950  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
951 
952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
953 
954  if (debug_ts) {
955  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
959  }
960 
961  output_packet(of, &pkt, ost, 0);
962  }
963 
964  return;
965 error:
966  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
967  exit_program(1);
968 }
969 
970 static void do_subtitle_out(OutputFile *of,
971  OutputStream *ost,
972  AVSubtitle *sub)
973 {
974  int subtitle_out_max_size = 1024 * 1024;
975  int subtitle_out_size, nb, i;
976  AVCodecContext *enc;
977  AVPacket pkt;
978  int64_t pts;
979 
980  if (sub->pts == AV_NOPTS_VALUE) {
981  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982  if (exit_on_error)
983  exit_program(1);
984  return;
985  }
986 
987  enc = ost->enc_ctx;
988 
989  if (!subtitle_out) {
990  subtitle_out = av_malloc(subtitle_out_max_size);
991  if (!subtitle_out) {
992  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
993  exit_program(1);
994  }
995  }
996 
997  /* Note: DVB subtitle need one packet to draw them and one other
998  packet to clear them */
999  /* XXX: signal it in the codec context ? */
1000  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1001  nb = 2;
1002  else
1003  nb = 1;
1004 
1005  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1006  pts = sub->pts;
1007  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008  pts -= output_files[ost->file_index]->start_time;
1009  for (i = 0; i < nb; i++) {
1010  unsigned save_num_rects = sub->num_rects;
1011 
1012  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  sub->pts = pts;
1017  // start_display_time is required to be 0
1018  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019  sub->end_display_time -= sub->start_display_time;
1020  sub->start_display_time = 0;
1021  if (i == 1)
1022  sub->num_rects = 0;
1023 
1024  ost->frames_encoded++;
1025 
1026  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027  subtitle_out_max_size, sub);
1028  if (i == 1)
1029  sub->num_rects = save_num_rects;
1030  if (subtitle_out_size < 0) {
1031  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1032  exit_program(1);
1033  }
1034 
1035  av_init_packet(&pkt);
1036  pkt.data = subtitle_out;
1037  pkt.size = subtitle_out_size;
1038  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041  /* XXX: the pts correction is handled here. Maybe handling
1042  it in the codec would be better */
1043  if (i == 0)
1044  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1045  else
1046  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047  }
1048  pkt.dts = pkt.pts;
1049  output_packet(of, &pkt, ost, 0);
1050  }
1051 }
1052 
1053 static void do_video_out(OutputFile *of,
1054  OutputStream *ost,
1055  AVFrame *next_picture,
1056  double sync_ipts)
1057 {
1058  int ret, format_video_sync;
1059  AVPacket pkt;
1060  AVCodecContext *enc = ost->enc_ctx;
1061  AVCodecParameters *mux_par = ost->st->codecpar;
1062  AVRational frame_rate;
1063  int nb_frames, nb0_frames, i;
1064  double delta, delta0;
1065  double duration = 0;
1066  int frame_size = 0;
1067  InputStream *ist = NULL;
1069 
1070  if (ost->source_index >= 0)
1071  ist = input_streams[ost->source_index];
1072 
1073  frame_rate = av_buffersink_get_frame_rate(filter);
1074  if (frame_rate.num > 0 && frame_rate.den > 0)
1075  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1076 
1077  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1079 
1080  if (!ost->filters_script &&
1081  !ost->filters &&
1082  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1083  next_picture &&
1084  ist &&
1085  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1086  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1087  }
1088 
1089  if (!next_picture) {
1090  //end, flushing
1091  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1092  ost->last_nb0_frames[1],
1093  ost->last_nb0_frames[2]);
1094  } else {
1095  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1096  delta = delta0 + duration;
1097 
1098  /* by default, we output a single frame */
1099  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1100  nb_frames = 1;
1101 
1102  format_video_sync = video_sync_method;
1103  if (format_video_sync == VSYNC_AUTO) {
1104  if(!strcmp(of->ctx->oformat->name, "avi")) {
1105  format_video_sync = VSYNC_VFR;
1106  } else
1107  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1108  if ( ist
1109  && format_video_sync == VSYNC_CFR
1110  && input_files[ist->file_index]->ctx->nb_streams == 1
1111  && input_files[ist->file_index]->input_ts_offset == 0) {
1112  format_video_sync = VSYNC_VSCFR;
1113  }
1114  if (format_video_sync == VSYNC_CFR && copy_ts) {
1115  format_video_sync = VSYNC_VSCFR;
1116  }
1117  }
1118  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1119 
1120  if (delta0 < 0 &&
1121  delta > 0 &&
1122  format_video_sync != VSYNC_PASSTHROUGH &&
1123  format_video_sync != VSYNC_DROP) {
1124  if (delta0 < -0.6) {
1125  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1126  } else
1127  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1128  sync_ipts = ost->sync_opts;
1129  duration += delta0;
1130  delta0 = 0;
1131  }
1132 
1133  switch (format_video_sync) {
1134  case VSYNC_VSCFR:
1135  if (ost->frame_number == 0 && delta0 >= 0.5) {
1136  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1137  delta = duration;
1138  delta0 = 0;
1139  ost->sync_opts = lrint(sync_ipts);
1140  }
1141  case VSYNC_CFR:
1142  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1143  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1144  nb_frames = 0;
1145  } else if (delta < -1.1)
1146  nb_frames = 0;
1147  else if (delta > 1.1) {
1148  nb_frames = lrintf(delta);
1149  if (delta0 > 1.1)
1150  nb0_frames = lrintf(delta0 - 0.6);
1151  }
1152  break;
1153  case VSYNC_VFR:
1154  if (delta <= -0.6)
1155  nb_frames = 0;
1156  else if (delta > 0.6)
1157  ost->sync_opts = lrint(sync_ipts);
1158  break;
1159  case VSYNC_DROP:
1160  case VSYNC_PASSTHROUGH:
1161  ost->sync_opts = lrint(sync_ipts);
1162  break;
1163  default:
1164  av_assert0(0);
1165  }
1166  }
1167 
1168  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1169  nb0_frames = FFMIN(nb0_frames, nb_frames);
1170 
1171  memmove(ost->last_nb0_frames + 1,
1172  ost->last_nb0_frames,
1173  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1174  ost->last_nb0_frames[0] = nb0_frames;
1175 
1176  if (nb0_frames == 0 && ost->last_dropped) {
1177  nb_frames_drop++;
1179  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1180  ost->frame_number, ost->st->index, ost->last_frame->pts);
1181  }
1182  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1183  if (nb_frames > dts_error_threshold * 30) {
1184  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1185  nb_frames_drop++;
1186  return;
1187  }
1188  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1189  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1190  if (nb_frames_dup > dup_warning) {
1191  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1192  dup_warning *= 10;
1193  }
1194  }
1195  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1196 
1197  /* duplicates frame if needed */
1198  for (i = 0; i < nb_frames; i++) {
1199  AVFrame *in_picture;
1200  int forced_keyframe = 0;
1201  double pts_time;
1202  av_init_packet(&pkt);
1203  pkt.data = NULL;
1204  pkt.size = 0;
1205 
1206  if (i < nb0_frames && ost->last_frame) {
1207  in_picture = ost->last_frame;
1208  } else
1209  in_picture = next_picture;
1210 
1211  if (!in_picture)
1212  return;
1213 
1214  in_picture->pts = ost->sync_opts;
1215 
1216  if (!check_recording_time(ost))
1217  return;
1218 
1220  ost->top_field_first >= 0)
1221  in_picture->top_field_first = !!ost->top_field_first;
1222 
1223  if (in_picture->interlaced_frame) {
1224  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1225  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1226  else
1227  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1228  } else
1229  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1230 
1231  in_picture->quality = enc->global_quality;
1232  in_picture->pict_type = 0;
1233 
1234  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1235  in_picture->pts != AV_NOPTS_VALUE)
1236  ost->forced_kf_ref_pts = in_picture->pts;
1237 
1238  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1239  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1240  if (ost->forced_kf_index < ost->forced_kf_count &&
1241  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1242  ost->forced_kf_index++;
1243  forced_keyframe = 1;
1244  } else if (ost->forced_keyframes_pexpr) {
1245  double res;
1246  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1249  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1255  res);
1256  if (res) {
1257  forced_keyframe = 1;
1263  }
1264 
1266  } else if ( ost->forced_keyframes
1267  && !strncmp(ost->forced_keyframes, "source", 6)
1268  && in_picture->key_frame==1) {
1269  forced_keyframe = 1;
1270  }
1271 
1272  if (forced_keyframe) {
1273  in_picture->pict_type = AV_PICTURE_TYPE_I;
1274  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1275  }
1276 
1278  if (debug_ts) {
1279  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1280  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1281  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1282  enc->time_base.num, enc->time_base.den);
1283  }
1284 
1285  ost->frames_encoded++;
1286 
1287  ret = avcodec_send_frame(enc, in_picture);
1288  if (ret < 0)
1289  goto error;
1290  // Make sure Closed Captions will not be duplicated
1292 
1293  while (1) {
1294  ret = avcodec_receive_packet(enc, &pkt);
1295  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296  if (ret == AVERROR(EAGAIN))
1297  break;
1298  if (ret < 0)
1299  goto error;
1300 
1301  if (debug_ts) {
1302  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1306  }
1307 
1308  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309  pkt.pts = ost->sync_opts;
1310 
1311  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1312 
1313  if (debug_ts) {
1314  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1318  }
1319 
1320  frame_size = pkt.size;
1321  output_packet(of, &pkt, ost, 0);
1322 
1323  /* if two pass, output log */
1324  if (ost->logfile && enc->stats_out) {
1325  fprintf(ost->logfile, "%s", enc->stats_out);
1326  }
1327  }
1328  ost->sync_opts++;
1329  /*
1330  * For video, number of frames in == number of packets out.
1331  * But there may be reordering, so we can't throw away frames on encoder
1332  * flush, we need to limit them here, before they go into encoder.
1333  */
1334  ost->frame_number++;
1335 
1336  if (vstats_filename && frame_size)
1337  do_video_stats(ost, frame_size);
1338  }
1339 
1340  if (!ost->last_frame)
1341  ost->last_frame = av_frame_alloc();
1342  av_frame_unref(ost->last_frame);
1343  if (next_picture && ost->last_frame)
1344  av_frame_ref(ost->last_frame, next_picture);
1345  else
1346  av_frame_free(&ost->last_frame);
1347 
1348  return;
1349 error:
1350  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1351  exit_program(1);
1352 }
1353 
1354 static double psnr(double d)
1355 {
1356  return -10.0 * log10(d);
1357 }
1358 
1360 {
1361  AVCodecContext *enc;
1362  int frame_number;
1363  double ti1, bitrate, avg_bitrate;
1364 
1365  /* this is executed just the first time do_video_stats is called */
1366  if (!vstats_file) {
1367  vstats_file = fopen(vstats_filename, "w");
1368  if (!vstats_file) {
1369  perror("fopen");
1370  exit_program(1);
1371  }
1372  }
1373 
1374  enc = ost->enc_ctx;
1375  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376  frame_number = ost->st->nb_frames;
1377  if (vstats_version <= 1) {
1378  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379  ost->quality / (float)FF_QP2LAMBDA);
1380  } else {
1381  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382  ost->quality / (float)FF_QP2LAMBDA);
1383  }
1384 
1385  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 
1388  fprintf(vstats_file,"f_size= %6d ", frame_size);
1389  /* compute pts value */
1390  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1391  if (ti1 < 0.01)
1392  ti1 = 0.01;
1393 
1394  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1399  }
1400 }
1401 
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 
1405 {
1406  OutputFile *of = output_files[ost->file_index];
1407  int i;
1408 
1410 
1411  if (of->shortest) {
1412  for (i = 0; i < of->ctx->nb_streams; i++)
1413  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414  }
1415 }
1416 
1417 /**
1418  * Get and encode new output from any of the filtergraphs, without causing
1419  * activity.
1420  *
1421  * @return 0 for success, <0 for severe errors
1422  */
1423 static int reap_filters(int flush)
1424 {
1425  AVFrame *filtered_frame = NULL;
1426  int i;
1427 
1428  /* Reap all buffers present in the buffer sinks */
1429  for (i = 0; i < nb_output_streams; i++) {
1430  OutputStream *ost = output_streams[i];
1431  OutputFile *of = output_files[ost->file_index];
1433  AVCodecContext *enc = ost->enc_ctx;
1434  int ret = 0;
1435 
1436  if (!ost->filter || !ost->filter->graph->graph)
1437  continue;
1438  filter = ost->filter->filter;
1439 
1440  if (!ost->initialized) {
1441  char error[1024] = "";
1442  ret = init_output_stream(ost, error, sizeof(error));
1443  if (ret < 0) {
1444  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445  ost->file_index, ost->index, error);
1446  exit_program(1);
1447  }
1448  }
1449 
1450  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451  return AVERROR(ENOMEM);
1452  }
1453  filtered_frame = ost->filtered_frame;
1454 
1455  while (1) {
1456  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459  if (ret < 0) {
1460  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463  } else if (flush && ret == AVERROR_EOF) {
1465  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1466  }
1467  break;
1468  }
1469  if (ost->finished) {
1470  av_frame_unref(filtered_frame);
1471  continue;
1472  }
1473  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475  AVRational filter_tb = av_buffersink_get_time_base(filter);
1476  AVRational tb = enc->time_base;
1477  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 
1479  tb.den <<= extra_bits;
1480  float_pts =
1481  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483  float_pts /= 1 << extra_bits;
1484  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 
1487  filtered_frame->pts =
1488  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1490  }
1491 
1492  switch (av_buffersink_get_type(filter)) {
1493  case AVMEDIA_TYPE_VIDEO:
1494  if (!ost->frame_aspect_ratio.num)
1495  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1496 
1497  if (debug_ts) {
1498  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1499  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1500  float_pts,
1501  enc->time_base.num, enc->time_base.den);
1502  }
1503 
1504  do_video_out(of, ost, filtered_frame, float_pts);
1505  break;
1506  case AVMEDIA_TYPE_AUDIO:
1507  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1508  enc->channels != filtered_frame->channels) {
1510  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1511  break;
1512  }
1513  do_audio_out(of, ost, filtered_frame);
1514  break;
1515  default:
1516  // TODO support subtitle filters
1517  av_assert0(0);
1518  }
1519 
1520  av_frame_unref(filtered_frame);
1521  }
1522  }
1523 
1524  return 0;
1525 }
1526 
1527 static void print_final_stats(int64_t total_size)
1528 {
1529  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1530  uint64_t subtitle_size = 0;
1531  uint64_t data_size = 0;
1532  float percent = -1.0;
1533  int i, j;
1534  int pass1_used = 1;
1535 
1536  for (i = 0; i < nb_output_streams; i++) {
1537  OutputStream *ost = output_streams[i];
1538  switch (ost->enc_ctx->codec_type) {
1539  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1540  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1541  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1542  default: other_size += ost->data_size; break;
1543  }
1544  extra_size += ost->enc_ctx->extradata_size;
1545  data_size += ost->data_size;
1548  pass1_used = 0;
1549  }
1550 
1551  if (data_size && total_size>0 && total_size >= data_size)
1552  percent = 100.0 * (total_size - data_size) / data_size;
1553 
1554  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1555  video_size / 1024.0,
1556  audio_size / 1024.0,
1557  subtitle_size / 1024.0,
1558  other_size / 1024.0,
1559  extra_size / 1024.0);
1560  if (percent >= 0.0)
1561  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1562  else
1563  av_log(NULL, AV_LOG_INFO, "unknown");
1564  av_log(NULL, AV_LOG_INFO, "\n");
1565 
1566  /* print verbose per-stream stats */
1567  for (i = 0; i < nb_input_files; i++) {
1568  InputFile *f = input_files[i];
1569  uint64_t total_packets = 0, total_size = 0;
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1572  i, f->ctx->url);
1573 
1574  for (j = 0; j < f->nb_streams; j++) {
1575  InputStream *ist = input_streams[f->ist_index + j];
1576  enum AVMediaType type = ist->dec_ctx->codec_type;
1577 
1578  total_size += ist->data_size;
1579  total_packets += ist->nb_packets;
1580 
1581  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1582  i, j, media_type_string(type));
1583  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1584  ist->nb_packets, ist->data_size);
1585 
1586  if (ist->decoding_needed) {
1587  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1588  ist->frames_decoded);
1589  if (type == AVMEDIA_TYPE_AUDIO)
1590  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1591  av_log(NULL, AV_LOG_VERBOSE, "; ");
1592  }
1593 
1594  av_log(NULL, AV_LOG_VERBOSE, "\n");
1595  }
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1598  total_packets, total_size);
1599  }
1600 
1601  for (i = 0; i < nb_output_files; i++) {
1602  OutputFile *of = output_files[i];
1603  uint64_t total_packets = 0, total_size = 0;
1604 
1605  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1606  i, of->ctx->url);
1607 
1608  for (j = 0; j < of->ctx->nb_streams; j++) {
1609  OutputStream *ost = output_streams[of->ost_index + j];
1610  enum AVMediaType type = ost->enc_ctx->codec_type;
1611 
1612  total_size += ost->data_size;
1613  total_packets += ost->packets_written;
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1616  i, j, media_type_string(type));
1617  if (ost->encoding_needed) {
1618  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1619  ost->frames_encoded);
1620  if (type == AVMEDIA_TYPE_AUDIO)
1621  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1622  av_log(NULL, AV_LOG_VERBOSE, "; ");
1623  }
1624 
1625  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1626  ost->packets_written, ost->data_size);
1627 
1628  av_log(NULL, AV_LOG_VERBOSE, "\n");
1629  }
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1632  total_packets, total_size);
1633  }
1634  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1635  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1636  if (pass1_used) {
1637  av_log(NULL, AV_LOG_WARNING, "\n");
1638  } else {
1639  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1640  }
1641  }
1642 }
1643 
1644 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1645 {
1646  AVBPrint buf, buf_script;
1647  OutputStream *ost;
1648  AVFormatContext *oc;
1649  int64_t total_size;
1650  AVCodecContext *enc;
1651  int frame_number, vid, i;
1652  double bitrate;
1653  double speed;
1654  int64_t pts = INT64_MIN + 1;
1655  static int64_t last_time = -1;
1656  static int qp_histogram[52];
1657  int hours, mins, secs, us;
1658  const char *hours_sign;
1659  int ret;
1660  float t;
1661 
1662  if (!print_stats && !is_last_report && !progress_avio)
1663  return;
1664 
1665  if (!is_last_report) {
1666  if (last_time == -1) {
1667  last_time = cur_time;
1668  return;
1669  }
1670  if ((cur_time - last_time) < 500000)
1671  return;
1672  last_time = cur_time;
1673  }
1674 
1675  t = (cur_time-timer_start) / 1000000.0;
1676 
1677 
1678  oc = output_files[0]->ctx;
1679 
1680  total_size = avio_size(oc->pb);
1681  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1682  total_size = avio_tell(oc->pb);
1683 
1684  vid = 0;
1686  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1687  for (i = 0; i < nb_output_streams; i++) {
1688  float q = -1;
1689  ost = output_streams[i];
1690  enc = ost->enc_ctx;
1691  if (!ost->stream_copy)
1692  q = ost->quality / (float) FF_QP2LAMBDA;
1693 
1694  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1695  av_bprintf(&buf, "q=%2.1f ", q);
1696  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1697  ost->file_index, ost->index, q);
1698  }
1699  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1700  float fps;
1701 
1702  frame_number = ost->frame_number;
1703  fps = t > 1 ? frame_number / t : 0;
1704  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1705  frame_number, fps < 9.95, fps, q);
1706  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1707  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1708  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1709  ost->file_index, ost->index, q);
1710  if (is_last_report)
1711  av_bprintf(&buf, "L");
1712  if (qp_hist) {
1713  int j;
1714  int qp = lrintf(q);
1715  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1716  qp_histogram[qp]++;
1717  for (j = 0; j < 32; j++)
1718  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1719  }
1720 
1721  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1722  int j;
1723  double error, error_sum = 0;
1724  double scale, scale_sum = 0;
1725  double p;
1726  char type[3] = { 'Y','U','V' };
1727  av_bprintf(&buf, "PSNR=");
1728  for (j = 0; j < 3; j++) {
1729  if (is_last_report) {
1730  error = enc->error[j];
1731  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1732  } else {
1733  error = ost->error[j];
1734  scale = enc->width * enc->height * 255.0 * 255.0;
1735  }
1736  if (j)
1737  scale /= 4;
1738  error_sum += error;
1739  scale_sum += scale;
1740  p = psnr(error / scale);
1741  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1742  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1743  ost->file_index, ost->index, type[j] | 32, p);
1744  }
1745  p = psnr(error_sum / scale_sum);
1746  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1747  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1748  ost->file_index, ost->index, p);
1749  }
1750  vid = 1;
1751  }
1752  /* compute min output value */
1754  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1755  ost->st->time_base, AV_TIME_BASE_Q));
1756  if (is_last_report)
1757  nb_frames_drop += ost->last_dropped;
1758  }
1759 
1760  secs = FFABS(pts) / AV_TIME_BASE;
1761  us = FFABS(pts) % AV_TIME_BASE;
1762  mins = secs / 60;
1763  secs %= 60;
1764  hours = mins / 60;
1765  mins %= 60;
1766  hours_sign = (pts < 0) ? "-" : "";
1767 
1768  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1769  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1770 
1771  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1772  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1773  if (pts == AV_NOPTS_VALUE) {
1774  av_bprintf(&buf, "N/A ");
1775  } else {
1776  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1777  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1778  }
1779 
1780  if (bitrate < 0) {
1781  av_bprintf(&buf, "bitrate=N/A");
1782  av_bprintf(&buf_script, "bitrate=N/A\n");
1783  }else{
1784  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1785  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1786  }
1787 
1788  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1789  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1790  if (pts == AV_NOPTS_VALUE) {
1791  av_bprintf(&buf_script, "out_time_us=N/A\n");
1792  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1793  av_bprintf(&buf_script, "out_time=N/A\n");
1794  } else {
1795  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1796  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1797  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1798  hours_sign, hours, mins, secs, us);
1799  }
1800 
1802  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1803  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1804  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1805 
1806  if (speed < 0) {
1807  av_bprintf(&buf, " speed=N/A");
1808  av_bprintf(&buf_script, "speed=N/A\n");
1809  } else {
1810  av_bprintf(&buf, " speed=%4.3gx", speed);
1811  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1812  }
1813 
1814  if (print_stats || is_last_report) {
1815  const char end = is_last_report ? '\n' : '\r';
1816  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1817  fprintf(stderr, "%s %c", buf.str, end);
1818  } else
1819  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1820 
1821  fflush(stderr);
1822  }
1823  av_bprint_finalize(&buf, NULL);
1824 
1825  if (progress_avio) {
1826  av_bprintf(&buf_script, "progress=%s\n",
1827  is_last_report ? "end" : "continue");
1828  avio_write(progress_avio, buf_script.str,
1829  FFMIN(buf_script.len, buf_script.size - 1));
1830  avio_flush(progress_avio);
1831  av_bprint_finalize(&buf_script, NULL);
1832  if (is_last_report) {
1833  if ((ret = avio_closep(&progress_avio)) < 0)
1835  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1836  }
1837  }
1838 
1839  if (is_last_report)
1840  print_final_stats(total_size);
1841 }
1842 
1844 {
1845  // We never got any input. Set a fake format, which will
1846  // come from libavformat.
1847  ifilter->format = par->format;
1848  ifilter->sample_rate = par->sample_rate;
1849  ifilter->channels = par->channels;
1850  ifilter->channel_layout = par->channel_layout;
1851  ifilter->width = par->width;
1852  ifilter->height = par->height;
1853  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1854 }
1855 
1856 static void flush_encoders(void)
1857 {
1858  int i, ret;
1859 
1860  for (i = 0; i < nb_output_streams; i++) {
1861  OutputStream *ost = output_streams[i];
1862  AVCodecContext *enc = ost->enc_ctx;
1863  OutputFile *of = output_files[ost->file_index];
1864 
1865  if (!ost->encoding_needed)
1866  continue;
1867 
1868  // Try to enable encoding with no input frames.
1869  // Maybe we should just let encoding fail instead.
1870  if (!ost->initialized) {
1871  FilterGraph *fg = ost->filter->graph;
1872  char error[1024] = "";
1873 
1875  "Finishing stream %d:%d without any data written to it.\n",
1876  ost->file_index, ost->st->index);
1877 
1878  if (ost->filter && !fg->graph) {
1879  int x;
1880  for (x = 0; x < fg->nb_inputs; x++) {
1881  InputFilter *ifilter = fg->inputs[x];
1882  if (ifilter->format < 0)
1883  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1884  }
1885 
1887  continue;
1888 
1889  ret = configure_filtergraph(fg);
1890  if (ret < 0) {
1891  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1892  exit_program(1);
1893  }
1894 
1895  finish_output_stream(ost);
1896  }
1897 
1898  ret = init_output_stream(ost, error, sizeof(error));
1899  if (ret < 0) {
1900  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1901  ost->file_index, ost->index, error);
1902  exit_program(1);
1903  }
1904  }
1905 
1906  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1907  continue;
1908 
1910  continue;
1911 
1912  for (;;) {
1913  const char *desc = NULL;
1914  AVPacket pkt;
1915  int pkt_size;
1916 
1917  switch (enc->codec_type) {
1918  case AVMEDIA_TYPE_AUDIO:
1919  desc = "audio";
1920  break;
1921  case AVMEDIA_TYPE_VIDEO:
1922  desc = "video";
1923  break;
1924  default:
1925  av_assert0(0);
1926  }
1927 
1928  av_init_packet(&pkt);
1929  pkt.data = NULL;
1930  pkt.size = 0;
1931 
1933 
1934  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1935  ret = avcodec_send_frame(enc, NULL);
1936  if (ret < 0) {
1937  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1938  desc,
1939  av_err2str(ret));
1940  exit_program(1);
1941  }
1942  }
1943 
1944  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1945  if (ret < 0 && ret != AVERROR_EOF) {
1946  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1947  desc,
1948  av_err2str(ret));
1949  exit_program(1);
1950  }
1951  if (ost->logfile && enc->stats_out) {
1952  fprintf(ost->logfile, "%s", enc->stats_out);
1953  }
1954  if (ret == AVERROR_EOF) {
1955  output_packet(of, &pkt, ost, 1);
1956  break;
1957  }
1958  if (ost->finished & MUXER_FINISHED) {
1959  av_packet_unref(&pkt);
1960  continue;
1961  }
1962  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1963  pkt_size = pkt.size;
1964  output_packet(of, &pkt, ost, 0);
1966  do_video_stats(ost, pkt_size);
1967  }
1968  }
1969  }
1970 }
1971 
1972 /*
1973  * Check whether a packet from ist should be written into ost at this time
1974  */
1976 {
1977  OutputFile *of = output_files[ost->file_index];
1978  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1979 
1980  if (ost->source_index != ist_index)
1981  return 0;
1982 
1983  if (ost->finished)
1984  return 0;
1985 
1986  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1987  return 0;
1988 
1989  return 1;
1990 }
1991 
1992 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1993 {
1994  OutputFile *of = output_files[ost->file_index];
1995  InputFile *f = input_files [ist->file_index];
1996  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1997  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1998  AVPacket opkt = { 0 };
1999 
2000  av_init_packet(&opkt);
2001 
2002  // EOF: flush output bitstream filters.
2003  if (!pkt) {
2004  output_packet(of, &opkt, ost, 1);
2005  return;
2006  }
2007 
2008  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2010  return;
2011 
2012  if (!ost->frame_number && !ost->copy_prior_start) {
2013  int64_t comp_start = start_time;
2014  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2015  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2016  if (pkt->pts == AV_NOPTS_VALUE ?
2017  ist->pts < comp_start :
2018  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2019  return;
2020  }
2021 
2022  if (of->recording_time != INT64_MAX &&
2023  ist->pts >= of->recording_time + start_time) {
2024  close_output_stream(ost);
2025  return;
2026  }
2027 
2028  if (f->recording_time != INT64_MAX) {
2029  start_time = f->ctx->start_time;
2030  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2031  start_time += f->start_time;
2032  if (ist->pts >= f->recording_time + start_time) {
2033  close_output_stream(ost);
2034  return;
2035  }
2036  }
2037 
2038  /* force the input stream PTS */
2039  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2040  ost->sync_opts++;
2041 
2042  if (pkt->pts != AV_NOPTS_VALUE)
2043  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2044  else
2045  opkt.pts = AV_NOPTS_VALUE;
2046 
2047  if (pkt->dts == AV_NOPTS_VALUE)
2048  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2049  else
2050  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2051  opkt.dts -= ost_tb_start_time;
2052 
2053  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2055  if(!duration)
2056  duration = ist->dec_ctx->frame_size;
2057  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2058  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2059  ost->mux_timebase) - ost_tb_start_time;
2060  }
2061 
2062  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2063 
2064  opkt.flags = pkt->flags;
2065 
2066  if (pkt->buf) {
2067  opkt.buf = av_buffer_ref(pkt->buf);
2068  if (!opkt.buf)
2069  exit_program(1);
2070  }
2071  opkt.data = pkt->data;
2072  opkt.size = pkt->size;
2073 
2074  av_copy_packet_side_data(&opkt, pkt);
2075 
2076  output_packet(of, &opkt, ost, 0);
2077 }
2078 
2080 {
2081  AVCodecContext *dec = ist->dec_ctx;
2082 
2083  if (!dec->channel_layout) {
2084  char layout_name[256];
2085 
2086  if (dec->channels > ist->guess_layout_max)
2087  return 0;
2089  if (!dec->channel_layout)
2090  return 0;
2091  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2092  dec->channels, dec->channel_layout);
2093  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2094  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2095  }
2096  return 1;
2097 }
2098 
2099 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2100 {
2101  if (*got_output || ret<0)
2102  decode_error_stat[ret<0] ++;
2103 
2104  if (ret < 0 && exit_on_error)
2105  exit_program(1);
2106 
2107  if (*got_output && ist) {
2110  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2111  if (exit_on_error)
2112  exit_program(1);
2113  }
2114  }
2115 }
2116 
2117 // Filters can be configured only if the formats of all inputs are known.
2119 {
2120  int i;
2121  for (i = 0; i < fg->nb_inputs; i++) {
2122  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2123  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2124  return 0;
2125  }
2126  return 1;
2127 }
2128 
2130 {
2131  FilterGraph *fg = ifilter->graph;
2132  int need_reinit, ret, i;
2133 
2134  /* determine if the parameters for this input changed */
2135  need_reinit = ifilter->format != frame->format;
2136 
2137  switch (ifilter->ist->st->codecpar->codec_type) {
2138  case AVMEDIA_TYPE_AUDIO:
2139  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2140  ifilter->channels != frame->channels ||
2141  ifilter->channel_layout != frame->channel_layout;
2142  break;
2143  case AVMEDIA_TYPE_VIDEO:
2144  need_reinit |= ifilter->width != frame->width ||
2145  ifilter->height != frame->height;
2146  break;
2147  }
2148 
2149  if (!ifilter->ist->reinit_filters && fg->graph)
2150  need_reinit = 0;
2151 
2152  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2153  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2154  need_reinit = 1;
2155 
2156  if (need_reinit) {
2157  ret = ifilter_parameters_from_frame(ifilter, frame);
2158  if (ret < 0)
2159  return ret;
2160  }
2161 
2162  /* (re)init the graph if possible, otherwise buffer the frame and return */
2163  if (need_reinit || !fg->graph) {
2164  for (i = 0; i < fg->nb_inputs; i++) {
2165  if (!ifilter_has_all_input_formats(fg)) {
2166  AVFrame *tmp = av_frame_clone(frame);
2167  if (!tmp)
2168  return AVERROR(ENOMEM);
2169  av_frame_unref(frame);
2170 
2171  if (!av_fifo_space(ifilter->frame_queue)) {
2172  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2173  if (ret < 0) {
2174  av_frame_free(&tmp);
2175  return ret;
2176  }
2177  }
2178  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2179  return 0;
2180  }
2181  }
2182 
2183  ret = reap_filters(1);
2184  if (ret < 0 && ret != AVERROR_EOF) {
2185  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2186  return ret;
2187  }
2188 
2189  ret = configure_filtergraph(fg);
2190  if (ret < 0) {
2191  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2192  return ret;
2193  }
2194  }
2195 
2197  if (ret < 0) {
2198  if (ret != AVERROR_EOF)
2199  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2200  return ret;
2201  }
2202 
2203  return 0;
2204 }
2205 
2206 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2207 {
2208  int ret;
2209 
2210  ifilter->eof = 1;
2211 
2212  if (ifilter->filter) {
2213  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2214  if (ret < 0)
2215  return ret;
2216  } else {
2217  // the filtergraph was never configured
2218  if (ifilter->format < 0)
2219  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2220  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2221  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2222  return AVERROR_INVALIDDATA;
2223  }
2224  }
2225 
2226  return 0;
2227 }
2228 
2229 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2230 // There is the following difference: if you got a frame, you must call
2231 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2232 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2233 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2234 {
2235  int ret;
2236 
2237  *got_frame = 0;
2238 
2239  if (pkt) {
2240  ret = avcodec_send_packet(avctx, pkt);
2241  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2242  // decoded frames with avcodec_receive_frame() until done.
2243  if (ret < 0 && ret != AVERROR_EOF)
2244  return ret;
2245  }
2246 
2247  ret = avcodec_receive_frame(avctx, frame);
2248  if (ret < 0 && ret != AVERROR(EAGAIN))
2249  return ret;
2250  if (ret >= 0)
2251  *got_frame = 1;
2252 
2253  return 0;
2254 }
2255 
2256 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2257 {
2258  int i, ret;
2259  AVFrame *f;
2260 
2261  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2262  for (i = 0; i < ist->nb_filters; i++) {
2263  if (i < ist->nb_filters - 1) {
2264  f = ist->filter_frame;
2265  ret = av_frame_ref(f, decoded_frame);
2266  if (ret < 0)
2267  break;
2268  } else
2269  f = decoded_frame;
2270  ret = ifilter_send_frame(ist->filters[i], f);
2271  if (ret == AVERROR_EOF)
2272  ret = 0; /* ignore */
2273  if (ret < 0) {
2275  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2276  break;
2277  }
2278  }
2279  return ret;
2280 }
2281 
2282 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2283  int *decode_failed)
2284 {
2285  AVFrame *decoded_frame;
2286  AVCodecContext *avctx = ist->dec_ctx;
2287  int ret, err = 0;
2288  AVRational decoded_frame_tb;
2289 
2290  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2291  return AVERROR(ENOMEM);
2292  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2293  return AVERROR(ENOMEM);
2294  decoded_frame = ist->decoded_frame;
2295 
2297  ret = decode(avctx, decoded_frame, got_output, pkt);
2298  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2299  if (ret < 0)
2300  *decode_failed = 1;
2301 
2302  if (ret >= 0 && avctx->sample_rate <= 0) {
2303  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2304  ret = AVERROR_INVALIDDATA;
2305  }
2306 
2307  if (ret != AVERROR_EOF)
2308  check_decode_result(ist, got_output, ret);
2309 
2310  if (!*got_output || ret < 0)
2311  return ret;
2312 
2313  ist->samples_decoded += decoded_frame->nb_samples;
2314  ist->frames_decoded++;
2315 
2316  /* increment next_dts to use for the case where the input stream does not
2317  have timestamps or there are multiple frames in the packet */
2318  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2319  avctx->sample_rate;
2320  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2321  avctx->sample_rate;
2322 
2323  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324  decoded_frame_tb = ist->st->time_base;
2325  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326  decoded_frame->pts = pkt->pts;
2327  decoded_frame_tb = ist->st->time_base;
2328  }else {
2329  decoded_frame->pts = ist->dts;
2330  decoded_frame_tb = AV_TIME_BASE_Q;
2331  }
2332  if (decoded_frame->pts != AV_NOPTS_VALUE)
2333  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335  (AVRational){1, avctx->sample_rate});
2336  ist->nb_samples = decoded_frame->nb_samples;
2337  err = send_frame_to_filters(ist, decoded_frame);
2338 
2340  av_frame_unref(decoded_frame);
2341  return err < 0 ? err : ret;
2342 }
2343 
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2345  int *decode_failed)
2346 {
2347  AVFrame *decoded_frame;
2348  int i, ret = 0, err = 0;
2349  int64_t best_effort_timestamp;
2350  int64_t dts = AV_NOPTS_VALUE;
2351  AVPacket avpkt;
2352 
2353  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2355  // skip the packet.
2356  if (!eof && pkt && pkt->size == 0)
2357  return 0;
2358 
2359  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360  return AVERROR(ENOMEM);
2361  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362  return AVERROR(ENOMEM);
2363  decoded_frame = ist->decoded_frame;
2364  if (ist->dts != AV_NOPTS_VALUE)
2365  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2366  if (pkt) {
2367  avpkt = *pkt;
2368  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2369  }
2370 
2371  // The old code used to set dts on the drain packet, which does not work
2372  // with the new API anymore.
2373  if (eof) {
2374  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2375  if (!new)
2376  return AVERROR(ENOMEM);
2377  ist->dts_buffer = new;
2378  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2379  }
2380 
2382  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2384  if (ret < 0)
2385  *decode_failed = 1;
2386 
2387  // The following line may be required in some cases where there is no parser
2388  // or the parser does not has_b_frames correctly
2389  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2392  } else
2394  "video_delay is larger in decoder than demuxer %d > %d.\n"
2395  "If you want to help, upload a sample "
2396  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398  ist->dec_ctx->has_b_frames,
2399  ist->st->codecpar->video_delay);
2400  }
2401 
2402  if (ret != AVERROR_EOF)
2403  check_decode_result(ist, got_output, ret);
2404 
2405  if (*got_output && ret >= 0) {
2406  if (ist->dec_ctx->width != decoded_frame->width ||
2407  ist->dec_ctx->height != decoded_frame->height ||
2408  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410  decoded_frame->width,
2411  decoded_frame->height,
2412  decoded_frame->format,
2413  ist->dec_ctx->width,
2414  ist->dec_ctx->height,
2415  ist->dec_ctx->pix_fmt);
2416  }
2417  }
2418 
2419  if (!*got_output || ret < 0)
2420  return ret;
2421 
2422  if(ist->top_field_first>=0)
2423  decoded_frame->top_field_first = ist->top_field_first;
2424 
2425  ist->frames_decoded++;
2426 
2427  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2429  if (err < 0)
2430  goto fail;
2431  }
2432  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2433 
2434  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435  *duration_pts = decoded_frame->pkt_duration;
2436 
2437  if (ist->framerate.num)
2438  best_effort_timestamp = ist->cfr_next_pts++;
2439 
2440  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441  best_effort_timestamp = ist->dts_buffer[0];
2442 
2443  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445  ist->nb_dts_buffer--;
2446  }
2447 
2448  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2450 
2451  if (ts != AV_NOPTS_VALUE)
2452  ist->next_pts = ist->pts = ts;
2453  }
2454 
2455  if (debug_ts) {
2456  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458  ist->st->index, av_ts2str(decoded_frame->pts),
2459  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460  best_effort_timestamp,
2461  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462  decoded_frame->key_frame, decoded_frame->pict_type,
2463  ist->st->time_base.num, ist->st->time_base.den);
2464  }
2465 
2466  if (ist->st->sample_aspect_ratio.num)
2467  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2468 
2469  err = send_frame_to_filters(ist, decoded_frame);
2470 
2471 fail:
2473  av_frame_unref(decoded_frame);
2474  return err < 0 ? err : ret;
2475 }
2476 
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2478  int *decode_failed)
2479 {
2480  AVSubtitle subtitle;
2481  int free_sub = 1;
2482  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483  &subtitle, got_output, pkt);
2484 
2485  check_decode_result(NULL, got_output, ret);
2486 
2487  if (ret < 0 || !*got_output) {
2488  *decode_failed = 1;
2489  if (!pkt->size)
2490  sub2video_flush(ist);
2491  return ret;
2492  }
2493 
2494  if (ist->fix_sub_duration) {
2495  int end = 1;
2496  if (ist->prev_sub.got_output) {
2497  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498  1000, AV_TIME_BASE);
2499  if (end < ist->prev_sub.subtitle.end_display_time) {
2500  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2503  end <= 0 ? ", dropping it" : "");
2505  }
2506  }
2507  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508  FFSWAP(int, ret, ist->prev_sub.ret);
2509  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2510  if (end <= 0)
2511  goto out;
2512  }
2513 
2514  if (!*got_output)
2515  return ret;
2516 
2517  if (ist->sub2video.frame) {
2518  sub2video_update(ist, &subtitle);
2519  } else if (ist->nb_filters) {
2520  if (!ist->sub2video.sub_queue)
2521  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522  if (!ist->sub2video.sub_queue)
2523  exit_program(1);
2524  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2526  if (ret < 0)
2527  exit_program(1);
2528  }
2529  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2530  free_sub = 0;
2531  }
2532 
2533  if (!subtitle.num_rects)
2534  goto out;
2535 
2536  ist->frames_decoded++;
2537 
2538  for (i = 0; i < nb_output_streams; i++) {
2539  OutputStream *ost = output_streams[i];
2540 
2541  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2543  continue;
2544 
2545  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2546  }
2547 
2548 out:
2549  if (free_sub)
2550  avsubtitle_free(&subtitle);
2551  return ret;
2552 }
2553 
2555 {
2556  int i, ret;
2557  /* TODO keep pts also in stream time base to avoid converting back */
2558  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2560 
2561  for (i = 0; i < ist->nb_filters; i++) {
2562  ret = ifilter_send_eof(ist->filters[i], pts);
2563  if (ret < 0)
2564  return ret;
2565  }
2566  return 0;
2567 }
2568 
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2571 {
2572  int ret = 0, i;
2573  int repeating = 0;
2574  int eof_reached = 0;
2575 
2576  AVPacket avpkt;
2577  if (!ist->saw_first_ts) {
2578  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2579  ist->pts = 0;
2580  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2583  }
2584  ist->saw_first_ts = 1;
2585  }
2586 
2587  if (ist->next_dts == AV_NOPTS_VALUE)
2588  ist->next_dts = ist->dts;
2589  if (ist->next_pts == AV_NOPTS_VALUE)
2590  ist->next_pts = ist->pts;
2591 
2592  if (!pkt) {
2593  /* EOF handling */
2594  av_init_packet(&avpkt);
2595  avpkt.data = NULL;
2596  avpkt.size = 0;
2597  } else {
2598  avpkt = *pkt;
2599  }
2600 
2601  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604  ist->next_pts = ist->pts = ist->dts;
2605  }
2606 
2607  // while we have more to decode or while the decoder did output something on EOF
2608  while (ist->decoding_needed) {
2609  int64_t duration_dts = 0;
2610  int64_t duration_pts = 0;
2611  int got_output = 0;
2612  int decode_failed = 0;
2613 
2614  ist->pts = ist->next_pts;
2615  ist->dts = ist->next_dts;
2616 
2617  switch (ist->dec_ctx->codec_type) {
2618  case AVMEDIA_TYPE_AUDIO:
2619  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2620  &decode_failed);
2621  break;
2622  case AVMEDIA_TYPE_VIDEO:
2623  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2624  &decode_failed);
2625  if (!repeating || !pkt || got_output) {
2626  if (pkt && pkt->duration) {
2627  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2630  duration_dts = ((int64_t)AV_TIME_BASE *
2631  ist->dec_ctx->framerate.den * ticks) /
2633  }
2634 
2635  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636  ist->next_dts += duration_dts;
2637  }else
2638  ist->next_dts = AV_NOPTS_VALUE;
2639  }
2640 
2641  if (got_output) {
2642  if (duration_pts > 0) {
2643  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2644  } else {
2645  ist->next_pts += duration_dts;
2646  }
2647  }
2648  break;
2649  case AVMEDIA_TYPE_SUBTITLE:
2650  if (repeating)
2651  break;
2652  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653  if (!pkt && ret >= 0)
2654  ret = AVERROR_EOF;
2655  break;
2656  default:
2657  return -1;
2658  }
2659 
2660  if (ret == AVERROR_EOF) {
2661  eof_reached = 1;
2662  break;
2663  }
2664 
2665  if (ret < 0) {
2666  if (decode_failed) {
2667  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668  ist->file_index, ist->st->index, av_err2str(ret));
2669  } else {
2670  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2672  }
2673  if (!decode_failed || exit_on_error)
2674  exit_program(1);
2675  break;
2676  }
2677 
2678  if (got_output)
2679  ist->got_output = 1;
2680 
2681  if (!got_output)
2682  break;
2683 
2684  // During draining, we might get multiple output frames in this loop.
2685  // ffmpeg.c does not drain the filter chain on configuration changes,
2686  // which means if we send multiple frames at once to the filters, and
2687  // one of those frames changes configuration, the buffered frames will
2688  // be lost. This can upset certain FATE tests.
2689  // Decode only 1 frame per call on EOF to appease these FATE tests.
2690  // The ideal solution would be to rewrite decoding to use the new
2691  // decoding API in a better way.
2692  if (!pkt)
2693  break;
2694 
2695  repeating = 1;
2696  }
2697 
2698  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699  /* except when looping we need to flush but not to send an EOF */
2700  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701  int ret = send_filter_eof(ist);
2702  if (ret < 0) {
2703  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2704  exit_program(1);
2705  }
2706  }
2707 
2708  /* handle stream copy */
2709  if (!ist->decoding_needed && pkt) {
2710  ist->dts = ist->next_dts;
2711  switch (ist->dec_ctx->codec_type) {
2712  case AVMEDIA_TYPE_AUDIO:
2713  av_assert1(pkt->duration >= 0);
2714  if (ist->dec_ctx->sample_rate) {
2715  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2716  ist->dec_ctx->sample_rate;
2717  } else {
2718  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2719  }
2720  break;
2721  case AVMEDIA_TYPE_VIDEO:
2722  if (ist->framerate.num) {
2723  // TODO: Remove work-around for c99-to-c89 issue 7
2724  AVRational time_base_q = AV_TIME_BASE_Q;
2725  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727  } else if (pkt->duration) {
2728  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729  } else if(ist->dec_ctx->framerate.num != 0) {
2730  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731  ist->next_dts += ((int64_t)AV_TIME_BASE *
2732  ist->dec_ctx->framerate.den * ticks) /
2734  }
2735  break;
2736  }
2737  ist->pts = ist->dts;
2738  ist->next_pts = ist->next_dts;
2739  }
2740  for (i = 0; i < nb_output_streams; i++) {
2741  OutputStream *ost = output_streams[i];
2742 
2743  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2744  continue;
2745 
2746  do_streamcopy(ist, ost, pkt);
2747  }
2748 
2749  return !eof_reached;
2750 }
2751 
2752 static void print_sdp(void)
2753 {
2754  char sdp[16384];
2755  int i;
2756  int j;
2757  AVIOContext *sdp_pb;
2758  AVFormatContext **avc;
2759 
2760  for (i = 0; i < nb_output_files; i++) {
2761  if (!output_files[i]->header_written)
2762  return;
2763  }
2764 
2765  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2766  if (!avc)
2767  exit_program(1);
2768  for (i = 0, j = 0; i < nb_output_files; i++) {
2769  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770  avc[j] = output_files[i]->ctx;
2771  j++;
2772  }
2773  }
2774 
2775  if (!j)
2776  goto fail;
2777 
2778  av_sdp_create(avc, j, sdp, sizeof(sdp));
2779 
2780  if (!sdp_filename) {
2781  printf("SDP:\n%s\n", sdp);
2782  fflush(stdout);
2783  } else {
2784  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2786  } else {
2787  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2788  avio_closep(&sdp_pb);
2790  }
2791  }
2792 
2793 fail:
2794  av_freep(&avc);
2795 }
2796 
2798 {
2799  InputStream *ist = s->opaque;
2800  const enum AVPixelFormat *p;
2801  int ret;
2802 
2803  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2805  const AVCodecHWConfig *config = NULL;
2806  int i;
2807 
2808  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2809  break;
2810 
2811  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2812  ist->hwaccel_id == HWACCEL_AUTO) {
2813  for (i = 0;; i++) {
2814  config = avcodec_get_hw_config(s->codec, i);
2815  if (!config)
2816  break;
2817  if (!(config->methods &
2819  continue;
2820  if (config->pix_fmt == *p)
2821  break;
2822  }
2823  }
2824  if (config) {
2825  if (config->device_type != ist->hwaccel_device_type) {
2826  // Different hwaccel offered, ignore.
2827  continue;
2828  }
2829 
2830  ret = hwaccel_decode_init(s);
2831  if (ret < 0) {
2832  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2834  "%s hwaccel requested for input stream #%d:%d, "
2835  "but cannot be initialized.\n",
2837  ist->file_index, ist->st->index);
2838  return AV_PIX_FMT_NONE;
2839  }
2840  continue;
2841  }
2842  } else {
2843  const HWAccel *hwaccel = NULL;
2844  int i;
2845  for (i = 0; hwaccels[i].name; i++) {
2846  if (hwaccels[i].pix_fmt == *p) {
2847  hwaccel = &hwaccels[i];
2848  break;
2849  }
2850  }
2851  if (!hwaccel) {
2852  // No hwaccel supporting this pixfmt.
2853  continue;
2854  }
2855  if (hwaccel->id != ist->hwaccel_id) {
2856  // Does not match requested hwaccel.
2857  continue;
2858  }
2859 
2860  ret = hwaccel->init(s);
2861  if (ret < 0) {
2863  "%s hwaccel requested for input stream #%d:%d, "
2864  "but cannot be initialized.\n", hwaccel->name,
2865  ist->file_index, ist->st->index);
2866  return AV_PIX_FMT_NONE;
2867  }
2868  }
2869 
2870  if (ist->hw_frames_ctx) {
2872  if (!s->hw_frames_ctx)
2873  return AV_PIX_FMT_NONE;
2874  }
2875 
2876  ist->hwaccel_pix_fmt = *p;
2877  break;
2878  }
2879 
2880  return *p;
2881 }
2882 
2884 {
2885  InputStream *ist = s->opaque;
2886 
2887  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2888  return ist->hwaccel_get_buffer(s, frame, flags);
2889 
2890  return avcodec_default_get_buffer2(s, frame, flags);
2891 }
2892 
2893 static int init_input_stream(int ist_index, char *error, int error_len)
2894 {
2895  int ret;
2896  InputStream *ist = input_streams[ist_index];
2897 
2898  if (ist->decoding_needed) {
2899  AVCodec *codec = ist->dec;
2900  if (!codec) {
2901  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2902  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2903  return AVERROR(EINVAL);
2904  }
2905 
2906  ist->dec_ctx->opaque = ist;
2907  ist->dec_ctx->get_format = get_format;
2908  ist->dec_ctx->get_buffer2 = get_buffer;
2909  ist->dec_ctx->thread_safe_callbacks = 1;
2910 
2911  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2912  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2913  (ist->decoding_needed & DECODING_FOR_OST)) {
2914  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2916  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2917  }
2918 
2919  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2920 
2921  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2922  * audio, and video decoders such as cuvid or mediacodec */
2923  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2924 
2925  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2926  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2927  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2929  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2930 
2931  ret = hw_device_setup_for_decode(ist);
2932  if (ret < 0) {
2933  snprintf(error, error_len, "Device setup failed for "
2934  "decoder on input stream #%d:%d : %s",
2935  ist->file_index, ist->st->index, av_err2str(ret));
2936  return ret;
2937  }
2938 
2939  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2940  if (ret == AVERROR_EXPERIMENTAL)
2941  abort_codec_experimental(codec, 0);
2942 
2943  snprintf(error, error_len,
2944  "Error while opening decoder for input stream "
2945  "#%d:%d : %s",
2946  ist->file_index, ist->st->index, av_err2str(ret));
2947  return ret;
2948  }
2950  }
2951 
2952  ist->next_pts = AV_NOPTS_VALUE;
2953  ist->next_dts = AV_NOPTS_VALUE;
2954 
2955  return 0;
2956 }
2957 
2959 {
2960  if (ost->source_index >= 0)
2961  return input_streams[ost->source_index];
2962  return NULL;
2963 }
2964 
2965 static int compare_int64(const void *a, const void *b)
2966 {
2967  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2968 }
2969 
2970 /* open the muxer when all the streams are initialized */
2971 static int check_init_output_file(OutputFile *of, int file_index)
2972 {
2973  int ret, i;
2974 
2975  for (i = 0; i < of->ctx->nb_streams; i++) {
2976  OutputStream *ost = output_streams[of->ost_index + i];
2977  if (!ost->initialized)
2978  return 0;
2979  }
2980 
2981  of->ctx->interrupt_callback = int_cb;
2982 
2983  ret = avformat_write_header(of->ctx, &of->opts);
2984  if (ret < 0) {
2986  "Could not write header for output file #%d "
2987  "(incorrect codec parameters ?): %s\n",
2988  file_index, av_err2str(ret));
2989  return ret;
2990  }
2991  //assert_avoptions(of->opts);
2992  of->header_written = 1;
2993 
2994  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2995 
2996  if (sdp_filename || want_sdp)
2997  print_sdp();
2998 
2999  /* flush the muxing queues */
3000  for (i = 0; i < of->ctx->nb_streams; i++) {
3001  OutputStream *ost = output_streams[of->ost_index + i];
3002 
3003  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3004  if (!av_fifo_size(ost->muxing_queue))
3005  ost->mux_timebase = ost->st->time_base;
3006 
3007  while (av_fifo_size(ost->muxing_queue)) {
3008  AVPacket pkt;
3009  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3010  write_packet(of, &pkt, ost, 1);
3011  }
3012  }
3013 
3014  return 0;
3015 }
3016 
3018 {
3019  AVBSFContext *ctx;
3020  int i, ret;
3021 
3022  if (!ost->nb_bitstream_filters)
3023  return 0;
3024 
3025  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3026  ctx = ost->bsf_ctx[i];
3027 
3028  ret = avcodec_parameters_copy(ctx->par_in,
3029  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3030  if (ret < 0)
3031  return ret;
3032 
3033  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3034 
3035  ret = av_bsf_init(ctx);
3036  if (ret < 0) {
3037  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3038  ost->bsf_ctx[i]->filter->name);
3039  return ret;
3040  }
3041  }
3042 
3043  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3044  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3045  if (ret < 0)
3046  return ret;
3047 
3048  ost->st->time_base = ctx->time_base_out;
3049 
3050  return 0;
3051 }
3052 
3054 {
3055  OutputFile *of = output_files[ost->file_index];
3056  InputStream *ist = get_input_stream(ost);
3057  AVCodecParameters *par_dst = ost->st->codecpar;
3058  AVCodecParameters *par_src = ost->ref_par;
3059  AVRational sar;
3060  int i, ret;
3061  uint32_t codec_tag = par_dst->codec_tag;
3062 
3063  av_assert0(ist && !ost->filter);
3064 
3065  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3066  if (ret >= 0)
3067  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3068  if (ret < 0) {
3070  "Error setting up codec context options.\n");
3071  return ret;
3072  }
3073 
3074  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3075  if (ret < 0) {
3077  "Error getting reference codec parameters.\n");
3078  return ret;
3079  }
3080 
3081  if (!codec_tag) {
3082  unsigned int codec_tag_tmp;
3083  if (!of->ctx->oformat->codec_tag ||
3084  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3085  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3086  codec_tag = par_src->codec_tag;
3087  }
3088 
3089  ret = avcodec_parameters_copy(par_dst, par_src);
3090  if (ret < 0)
3091  return ret;
3092 
3093  par_dst->codec_tag = codec_tag;
3094 
3095  if (!ost->frame_rate.num)
3096  ost->frame_rate = ist->framerate;
3097  ost->st->avg_frame_rate = ost->frame_rate;
3098 
3100  if (ret < 0)
3101  return ret;
3102 
3103  // copy timebase while removing common factors
3104  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3106 
3107  // copy estimated duration as a hint to the muxer
3108  if (ost->st->duration <= 0 && ist->st->duration > 0)
3109  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3110 
3111  // copy disposition
3112  ost->st->disposition = ist->st->disposition;
3113 
3114  if (ist->st->nb_side_data) {
3115  for (i = 0; i < ist->st->nb_side_data; i++) {
3116  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3117  uint8_t *dst_data;
3118 
3119  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3120  if (!dst_data)
3121  return AVERROR(ENOMEM);
3122  memcpy(dst_data, sd_src->data, sd_src->size);
3123  }
3124  }
3125 
3126  if (ost->rotate_overridden) {
3128  sizeof(int32_t) * 9);
3129  if (sd)
3131  }
3132 
3133  switch (par_dst->codec_type) {
3134  case AVMEDIA_TYPE_AUDIO:
3135  if (audio_volume != 256) {
3136  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3137  exit_program(1);
3138  }
3139  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3140  par_dst->block_align= 0;
3141  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3142  par_dst->block_align= 0;
3143  break;
3144  case AVMEDIA_TYPE_VIDEO:
3145  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3146  sar =
3148  (AVRational){ par_dst->height, par_dst->width });
3149  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3150  "with stream copy may produce invalid files\n");
3151  }
3152  else if (ist->st->sample_aspect_ratio.num)
3153  sar = ist->st->sample_aspect_ratio;
3154  else
3155  sar = par_src->sample_aspect_ratio;
3156  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3157  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3158  ost->st->r_frame_rate = ist->st->r_frame_rate;
3159  break;
3160  }
3161 
3162  ost->mux_timebase = ist->st->time_base;
3163 
3164  return 0;
3165 }
3166 
3168 {
3169  AVDictionaryEntry *e;
3170 
3171  uint8_t *encoder_string;
3172  int encoder_string_len;
3173  int format_flags = 0;
3174  int codec_flags = ost->enc_ctx->flags;
3175 
3176  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3177  return;
3178 
3179  e = av_dict_get(of->opts, "fflags", NULL, 0);
3180  if (e) {
3181  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3182  if (!o)
3183  return;
3184  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3185  }
3186  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3187  if (e) {
3188  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3189  if (!o)
3190  return;
3191  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3192  }
3193 
3194  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3195  encoder_string = av_mallocz(encoder_string_len);
3196  if (!encoder_string)
3197  exit_program(1);
3198 
3199  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3200  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3201  else
3202  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3203  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3204  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3206 }
3207 
3208 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3209  AVCodecContext *avctx)
3210 {
3211  char *p;
3212  int n = 1, i, size, index = 0;
3213  int64_t t, *pts;
3214 
3215  for (p = kf; *p; p++)
3216  if (*p == ',')
3217  n++;
3218  size = n;
3219  pts = av_malloc_array(size, sizeof(*pts));
3220  if (!pts) {
3221  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3222  exit_program(1);
3223  }
3224 
3225  p = kf;
3226  for (i = 0; i < n; i++) {
3227  char *next = strchr(p, ',');
3228 
3229  if (next)
3230  *next++ = 0;
3231 
3232  if (!memcmp(p, "chapters", 8)) {
3233 
3234  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3235  int j;
3236 
3237  if (avf->nb_chapters > INT_MAX - size ||
3238  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3239  sizeof(*pts)))) {
3241  "Could not allocate forced key frames array.\n");
3242  exit_program(1);
3243  }
3244  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3245  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3246 
3247  for (j = 0; j < avf->nb_chapters; j++) {
3248  AVChapter *c = avf->chapters[j];
3249  av_assert1(index < size);
3250  pts[index++] = av_rescale_q(c->start, c->time_base,
3251  avctx->time_base) + t;
3252  }
3253 
3254  } else {
3255 
3256  t = parse_time_or_die("force_key_frames", p, 1);
3257  av_assert1(index < size);
3258  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3259 
3260  }
3261 
3262  p = next;
3263  }
3264 
3265  av_assert0(index == size);
3266  qsort(pts, size, sizeof(*pts), compare_int64);
3267  ost->forced_kf_count = size;
3268  ost->forced_kf_pts = pts;
3269 }
3270 
3271 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3272 {
3273  InputStream *ist = get_input_stream(ost);
3274  AVCodecContext *enc_ctx = ost->enc_ctx;
3275  AVFormatContext *oc;
3276 
3277  if (ost->enc_timebase.num > 0) {
3278  enc_ctx->time_base = ost->enc_timebase;
3279  return;
3280  }
3281 
3282  if (ost->enc_timebase.num < 0) {
3283  if (ist) {
3284  enc_ctx->time_base = ist->st->time_base;
3285  return;
3286  }
3287 
3288  oc = output_files[ost->file_index]->ctx;
3289  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3290  }
3291 
3292  enc_ctx->time_base = default_time_base;
3293 }
3294 
3296 {
3297  InputStream *ist = get_input_stream(ost);
3298  AVCodecContext *enc_ctx = ost->enc_ctx;
3300  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3301  int j, ret;
3302 
3303  set_encoder_id(output_files[ost->file_index], ost);
3304 
3305  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3306  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3307  // which have to be filtered out to prevent leaking them to output files.
3308  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3309 
3310  if (ist) {
3311  ost->st->disposition = ist->st->disposition;
3312 
3313  dec_ctx = ist->dec_ctx;
3314 
3315  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3316  } else {
3317  for (j = 0; j < oc->nb_streams; j++) {
3318  AVStream *st = oc->streams[j];
3319  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3320  break;
3321  }
3322  if (j == oc->nb_streams)
3323  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3326  }
3327 
3328  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3329  if (!ost->frame_rate.num)
3331  if (ist && !ost->frame_rate.num)
3332  ost->frame_rate = ist->framerate;
3333  if (ist && !ost->frame_rate.num)
3334  ost->frame_rate = ist->st->r_frame_rate;
3335  if (ist && !ost->frame_rate.num) {
3336  ost->frame_rate = (AVRational){25, 1};
3338  "No information "
3339  "about the input framerate is available. Falling "
3340  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3341  "if you want a different framerate.\n",
3342  ost->file_index, ost->index);
3343  }
3344 
3345  if (ost->enc->supported_framerates && !ost->force_fps) {
3346  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3347  ost->frame_rate = ost->enc->supported_framerates[idx];
3348  }
3349  // reduce frame rate for mpeg4 to be within the spec limits
3350  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3351  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3352  ost->frame_rate.num, ost->frame_rate.den, 65535);
3353  }
3354  }
3355 
3356  switch (enc_ctx->codec_type) {
3357  case AVMEDIA_TYPE_AUDIO:
3359  if (dec_ctx)
3360  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3361  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3365 
3366  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3367  break;
3368 
3369  case AVMEDIA_TYPE_VIDEO:
3371 
3372  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3374  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3376  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3377  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3378  }
3379  for (j = 0; j < ost->forced_kf_count; j++)
3380  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3382  enc_ctx->time_base);
3383 
3384  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3385  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3386  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3387  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3388  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3390 
3391  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3392  if (dec_ctx)
3393  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3394  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3395 
3396  enc_ctx->framerate = ost->frame_rate;
3397 
3398  ost->st->avg_frame_rate = ost->frame_rate;
3399 
3400  if (!dec_ctx ||
3401  enc_ctx->width != dec_ctx->width ||
3402  enc_ctx->height != dec_ctx->height ||
3403  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3405  }
3406 
3407  if (ost->top_field_first == 0) {
3408  enc_ctx->field_order = AV_FIELD_BB;
3409  } else if (ost->top_field_first == 1) {
3410  enc_ctx->field_order = AV_FIELD_TT;
3411  }
3412 
3413  if (ost->forced_keyframes) {
3414  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3417  if (ret < 0) {
3419  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3420  return ret;
3421  }
3426 
3427  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3428  // parse it only for static kf timings
3429  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3431  }
3432  }
3433  break;
3434  case AVMEDIA_TYPE_SUBTITLE:
3435  enc_ctx->time_base = AV_TIME_BASE_Q;
3436  if (!enc_ctx->width) {
3437  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3438  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3439  }
3440  break;
3441  case AVMEDIA_TYPE_DATA:
3442  break;
3443  default:
3444  abort();
3445  break;
3446  }
3447 
3448  ost->mux_timebase = enc_ctx->time_base;
3449 
3450  return 0;
3451 }
3452 
3453 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3454 {
3455  int ret = 0;
3456 
3457  if (ost->encoding_needed) {
3458  AVCodec *codec = ost->enc;
3459  AVCodecContext *dec = NULL;
3460  InputStream *ist;
3461 
3462  ret = init_output_stream_encode(ost);
3463  if (ret < 0)
3464  return ret;
3465 
3466  if ((ist = get_input_stream(ost)))
3467  dec = ist->dec_ctx;
3468  if (dec && dec->subtitle_header) {
3469  /* ASS code assumes this buffer is null terminated so add extra byte. */
3471  if (!ost->enc_ctx->subtitle_header)
3472  return AVERROR(ENOMEM);
3473  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3475  }
3476  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3477  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3478  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3479  !codec->defaults &&
3480  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3481  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3482  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3483 
3484  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3488  if (!ost->enc_ctx->hw_frames_ctx)
3489  return AVERROR(ENOMEM);
3490  } else {
3491  ret = hw_device_setup_for_encode(ost);
3492  if (ret < 0) {
3493  snprintf(error, error_len, "Device setup failed for "
3494  "encoder on output stream #%d:%d : %s",
3495  ost->file_index, ost->index, av_err2str(ret));
3496  return ret;
3497  }
3498  }
3499  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3500  int input_props = 0, output_props = 0;
3501  AVCodecDescriptor const *input_descriptor =
3503  AVCodecDescriptor const *output_descriptor =
3505  if (input_descriptor)
3506  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3507  if (output_descriptor)
3508  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3509  if (input_props && output_props && input_props != output_props) {
3510  snprintf(error, error_len,
3511  "Subtitle encoding currently only possible from text to text "
3512  "or bitmap to bitmap");
3513  return AVERROR_INVALIDDATA;
3514  }
3515  }
3516 
3517  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3518  if (ret == AVERROR_EXPERIMENTAL)
3519  abort_codec_experimental(codec, 1);
3520  snprintf(error, error_len,
3521  "Error while opening encoder for output stream #%d:%d - "
3522  "maybe incorrect parameters such as bit_rate, rate, width or height",
3523  ost->file_index, ost->index);
3524  return ret;
3525  }
3526  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3527  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3529  ost->enc_ctx->frame_size);
3531  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3532  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3533  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3534  " It takes bits/s as argument, not kbits/s\n");
3535 
3537  if (ret < 0) {
3539  "Error initializing the output stream codec context.\n");
3540  exit_program(1);
3541  }
3542  /*
3543  * FIXME: ost->st->codec should't be needed here anymore.
3544  */
3545  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3546  if (ret < 0)
3547  return ret;
3548 
3549  if (ost->enc_ctx->nb_coded_side_data) {
3550  int i;
3551 
3552  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3553  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3554  uint8_t *dst_data;
3555 
3556  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3557  if (!dst_data)
3558  return AVERROR(ENOMEM);
3559  memcpy(dst_data, sd_src->data, sd_src->size);
3560  }
3561  }
3562 
3563  /*
3564  * Add global input side data. For now this is naive, and copies it
3565  * from the input stream's global side data. All side data should
3566  * really be funneled over AVFrame and libavfilter, then added back to
3567  * packet side data, and then potentially using the first packet for
3568  * global side data.
3569  */
3570  if (ist) {
3571  int i;
3572  for (i = 0; i < ist->st->nb_side_data; i++) {
3573  AVPacketSideData *sd = &ist->st->side_data[i];
3574  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3575  if (!dst)
3576  return AVERROR(ENOMEM);
3577  memcpy(dst, sd->data, sd->size);
3578  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3579  av_display_rotation_set((uint32_t *)dst, 0);
3580  }
3581  }
3582 
3583  // copy timebase while removing common factors
3584  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3585  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3586 
3587  // copy estimated duration as a hint to the muxer
3588  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3589  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3590 
3591  ost->st->codec->codec= ost->enc_ctx->codec;
3592  } else if (ost->stream_copy) {
3593  ret = init_output_stream_streamcopy(ost);
3594  if (ret < 0)
3595  return ret;
3596  }
3597 
3598  // parse user provided disposition, and update stream values
3599  if (ost->disposition) {
3600  static const AVOption opts[] = {
3601  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3602  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3603  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3604  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3605  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3606  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3607  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3608  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3609  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3610  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3611  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3612  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3613  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3614  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3615  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3616  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3617  { NULL },
3618  };
3619  static const AVClass class = {
3620  .class_name = "",
3621  .item_name = av_default_item_name,
3622  .option = opts,
3623  .version = LIBAVUTIL_VERSION_INT,
3624  };
3625  const AVClass *pclass = &class;
3626 
3627  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3628  if (ret < 0)
3629  return ret;
3630  }
3631 
3632  /* initialize bitstream filters for the output stream
3633  * needs to be done here, because the codec id for streamcopy is not
3634  * known until now */
3635  ret = init_output_bsfs(ost);
3636  if (ret < 0)
3637  return ret;
3638 
3639  ost->initialized = 1;
3640 
3641  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3642  if (ret < 0)
3643  return ret;
3644 
3645  return ret;
3646 }
3647 
3648 static void report_new_stream(int input_index, AVPacket *pkt)
3649 {
3650  InputFile *file = input_files[input_index];
3651  AVStream *st = file->ctx->streams[pkt->stream_index];
3652 
3653  if (pkt->stream_index < file->nb_streams_warn)
3654  return;
3655  av_log(file->ctx, AV_LOG_WARNING,
3656  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3658  input_index, pkt->stream_index,
3659  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3660  file->nb_streams_warn = pkt->stream_index + 1;
3661 }
3662 
3663 static int transcode_init(void)
3664 {
3665  int ret = 0, i, j, k;
3666  AVFormatContext *oc;
3667  OutputStream *ost;
3668  InputStream *ist;
3669  char error[1024] = {0};
3670 
3671  for (i = 0; i < nb_filtergraphs; i++) {
3672  FilterGraph *fg = filtergraphs[i];
3673  for (j = 0; j < fg->nb_outputs; j++) {
3674  OutputFilter *ofilter = fg->outputs[j];
3675  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3676  continue;
3677  if (fg->nb_inputs != 1)
3678  continue;
3679  for (k = nb_input_streams-1; k >= 0 ; k--)
3680  if (fg->inputs[0]->ist == input_streams[k])
3681  break;
3682  ofilter->ost->source_index = k;
3683  }
3684  }
3685 
3686  /* init framerate emulation */
3687  for (i = 0; i < nb_input_files; i++) {
3688  InputFile *ifile = input_files[i];
3689  if (ifile->rate_emu)
3690  for (j = 0; j < ifile->nb_streams; j++)
3691  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3692  }
3693 
3694  /* init input streams */
3695  for (i = 0; i < nb_input_streams; i++)
3696  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3697  for (i = 0; i < nb_output_streams; i++) {
3698  ost = output_streams[i];
3699  avcodec_close(ost->enc_ctx);
3700  }
3701  goto dump_format;
3702  }
3703 
3704  /* open each encoder */
3705  for (i = 0; i < nb_output_streams; i++) {
3706  // skip streams fed from filtergraphs until we have a frame for them
3707  if (output_streams[i]->filter)
3708  continue;
3709 
3710  ret = init_output_stream(output_streams[i], error, sizeof(error));
3711  if (ret < 0)
3712  goto dump_format;
3713  }
3714 
3715  /* discard unused programs */
3716  for (i = 0; i < nb_input_files; i++) {
3717  InputFile *ifile = input_files[i];
3718  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3719  AVProgram *p = ifile->ctx->programs[j];
3720  int discard = AVDISCARD_ALL;
3721 
3722  for (k = 0; k < p->nb_stream_indexes; k++)
3723  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3724  discard = AVDISCARD_DEFAULT;
3725  break;
3726  }
3727  p->discard = discard;
3728  }
3729  }
3730 
3731  /* write headers for files with no streams */
3732  for (i = 0; i < nb_output_files; i++) {
3733  oc = output_files[i]->ctx;
3734  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3735  ret = check_init_output_file(output_files[i], i);
3736  if (ret < 0)
3737  goto dump_format;
3738  }
3739  }
3740 
3741  dump_format:
3742  /* dump the stream mapping */
3743  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3744  for (i = 0; i < nb_input_streams; i++) {
3745  ist = input_streams[i];
3746 
3747  for (j = 0; j < ist->nb_filters; j++) {
3748  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3749  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3750  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3751  ist->filters[j]->name);
3752  if (nb_filtergraphs > 1)
3753  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3754  av_log(NULL, AV_LOG_INFO, "\n");
3755  }
3756  }
3757  }
3758 
3759  for (i = 0; i < nb_output_streams; i++) {
3760  ost = output_streams[i];
3761 
3762  if (ost->attachment_filename) {
3763  /* an attached file */
3764  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3765  ost->attachment_filename, ost->file_index, ost->index);
3766  continue;
3767  }
3768 
3769  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3770  /* output from a complex graph */
3771  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3772  if (nb_filtergraphs > 1)
3773  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3774 
3775  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3776  ost->index, ost->enc ? ost->enc->name : "?");
3777  continue;
3778  }
3779 
3780  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3781  input_streams[ost->source_index]->file_index,
3782  input_streams[ost->source_index]->st->index,
3783  ost->file_index,
3784  ost->index);
3785  if (ost->sync_ist != input_streams[ost->source_index])
3786  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3787  ost->sync_ist->file_index,
3788  ost->sync_ist->st->index);
3789  if (ost->stream_copy)
3790  av_log(NULL, AV_LOG_INFO, " (copy)");
3791  else {
3792  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3793  const AVCodec *out_codec = ost->enc;
3794  const char *decoder_name = "?";
3795  const char *in_codec_name = "?";
3796  const char *encoder_name = "?";
3797  const char *out_codec_name = "?";
3798  const AVCodecDescriptor *desc;
3799 
3800  if (in_codec) {
3801  decoder_name = in_codec->name;
3802  desc = avcodec_descriptor_get(in_codec->id);
3803  if (desc)
3804  in_codec_name = desc->name;
3805  if (!strcmp(decoder_name, in_codec_name))
3806  decoder_name = "native";
3807  }
3808 
3809  if (out_codec) {
3810  encoder_name = out_codec->name;
3811  desc = avcodec_descriptor_get(out_codec->id);
3812  if (desc)
3813  out_codec_name = desc->name;
3814  if (!strcmp(encoder_name, out_codec_name))
3815  encoder_name = "native";
3816  }
3817 
3818  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3819  in_codec_name, decoder_name,
3820  out_codec_name, encoder_name);
3821  }
3822  av_log(NULL, AV_LOG_INFO, "\n");
3823  }
3824 
3825  if (ret) {
3826  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3827  return ret;
3828  }
3829 
3831 
3832  return 0;
3833 }
3834 
3835 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3836 static int need_output(void)
3837 {
3838  int i;
3839 
3840  for (i = 0; i < nb_output_streams; i++) {
3841  OutputStream *ost = output_streams[i];
3842  OutputFile *of = output_files[ost->file_index];
3843  AVFormatContext *os = output_files[ost->file_index]->ctx;
3844 
3845  if (ost->finished ||
3846  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3847  continue;
3848  if (ost->frame_number >= ost->max_frames) {
3849  int j;
3850  for (j = 0; j < of->ctx->nb_streams; j++)
3851  close_output_stream(output_streams[of->ost_index + j]);
3852  continue;
3853  }
3854 
3855  return 1;
3856  }
3857 
3858  return 0;
3859 }
3860 
3861 /**
3862  * Select the output stream to process.
3863  *
3864  * @return selected output stream, or NULL if none available
3865  */
3867 {
3868  int i;
3869  int64_t opts_min = INT64_MAX;
3870  OutputStream *ost_min = NULL;
3871 
3872  for (i = 0; i < nb_output_streams; i++) {
3873  OutputStream *ost = output_streams[i];
3874  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3875  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3876  AV_TIME_BASE_Q);
3877  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3879  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3880  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3881 
3882  if (!ost->initialized && !ost->inputs_done)
3883  return ost;
3884 
3885  if (!ost->finished && opts < opts_min) {
3886  opts_min = opts;
3887  ost_min = ost->unavailable ? NULL : ost;
3888  }
3889  }
3890  return ost_min;
3891 }
3892 
3893 static void set_tty_echo(int on)
3894 {
3895 #if HAVE_TERMIOS_H
3896  struct termios tty;
3897  if (tcgetattr(0, &tty) == 0) {
3898  if (on) tty.c_lflag |= ECHO;
3899  else tty.c_lflag &= ~ECHO;
3900  tcsetattr(0, TCSANOW, &tty);
3901  }
3902 #endif
3903 }
3904 
3905 static int check_keyboard_interaction(int64_t cur_time)
3906 {
3907  int i, ret, key;
3908  static int64_t last_time;
3909  if (received_nb_signals)
3910  return AVERROR_EXIT;
3911  /* read_key() returns 0 on EOF */
3912  if(cur_time - last_time >= 100000 && !run_as_daemon){
3913  key = read_key();
3914  last_time = cur_time;
3915  }else
3916  key = -1;
3917  if (key == 'q')
3918  return AVERROR_EXIT;
3919  if (key == '+') av_log_set_level(av_log_get_level()+10);
3920  if (key == '-') av_log_set_level(av_log_get_level()-10);
3921  if (key == 's') qp_hist ^= 1;
3922  if (key == 'h'){
3923  if (do_hex_dump){
3924  do_hex_dump = do_pkt_dump = 0;
3925  } else if(do_pkt_dump){
3926  do_hex_dump = 1;
3927  } else
3928  do_pkt_dump = 1;
3930  }
3931  if (key == 'c' || key == 'C'){
3932  char buf[4096], target[64], command[256], arg[256] = {0};
3933  double time;
3934  int k, n = 0;
3935  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3936  i = 0;
3937  set_tty_echo(1);
3938  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3939  if (k > 0)
3940  buf[i++] = k;
3941  buf[i] = 0;
3942  set_tty_echo(0);
3943  fprintf(stderr, "\n");
3944  if (k > 0 &&
3945  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3946  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3947  target, time, command, arg);
3948  for (i = 0; i < nb_filtergraphs; i++) {
3949  FilterGraph *fg = filtergraphs[i];
3950  if (fg->graph) {
3951  if (time < 0) {
3952  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3953  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3954  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3955  } else if (key == 'c') {
3956  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3957  ret = AVERROR_PATCHWELCOME;
3958  } else {
3959  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3960  if (ret < 0)
3961  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3962  }
3963  }
3964  }
3965  } else {
3967  "Parse error, at least 3 arguments were expected, "
3968  "only %d given in string '%s'\n", n, buf);
3969  }
3970  }
3971  if (key == 'd' || key == 'D'){
3972  int debug=0;
3973  if(key == 'D') {
3974  debug = input_streams[0]->st->codec->debug<<1;
3975  if(!debug) debug = 1;
3976  while(debug & (FF_DEBUG_DCT_COEFF
3977 #if FF_API_DEBUG_MV
3978  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3979 #endif
3980  )) //unsupported, would just crash
3981  debug += debug;
3982  }else{
3983  char buf[32];
3984  int k = 0;
3985  i = 0;
3986  set_tty_echo(1);
3987  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3988  if (k > 0)
3989  buf[i++] = k;
3990  buf[i] = 0;
3991  set_tty_echo(0);
3992  fprintf(stderr, "\n");
3993  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3994  fprintf(stderr,"error parsing debug value\n");
3995  }
3996  for(i=0;i<nb_input_streams;i++) {
3997  input_streams[i]->st->codec->debug = debug;
3998  }
3999  for(i=0;i<nb_output_streams;i++) {
4000  OutputStream *ost = output_streams[i];
4001  ost->enc_ctx->debug = debug;
4002  }
4003  if(debug) av_log_set_level(AV_LOG_DEBUG);
4004  fprintf(stderr,"debug=%d\n", debug);
4005  }
4006  if (key == '?'){
4007  fprintf(stderr, "key function\n"
4008  "? show this help\n"
4009  "+ increase verbosity\n"
4010  "- decrease verbosity\n"
4011  "c Send command to first matching filter supporting it\n"
4012  "C Send/Queue command to all matching filters\n"
4013  "D cycle through available debug modes\n"
4014  "h dump packets/hex press to cycle through the 3 states\n"
4015  "q quit\n"
4016  "s Show QP histogram\n"
4017  );
4018  }
4019  return 0;
4020 }
4021 
4022 #if HAVE_THREADS
4023 static void *input_thread(void *arg)
4024 {
4025  InputFile *f = arg;
4026  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4027  int ret = 0;
4028 
4029  while (1) {
4030  AVPacket pkt;
4031  ret = av_read_frame(f->ctx, &pkt);
4032 
4033  if (ret == AVERROR(EAGAIN)) {
4034  av_usleep(10000);
4035  continue;
4036  }
4037  if (ret < 0) {
4038  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4039  break;
4040  }
4041  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4042  if (flags && ret == AVERROR(EAGAIN)) {
4043  flags = 0;
4044  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4046  "Thread message queue blocking; consider raising the "
4047  "thread_queue_size option (current value: %d)\n",
4048  f->thread_queue_size);
4049  }
4050  if (ret < 0) {
4051  if (ret != AVERROR_EOF)
4052  av_log(f->ctx, AV_LOG_ERROR,
4053  "Unable to send packet to main thread: %s\n",
4054  av_err2str(ret));
4055  av_packet_unref(&pkt);
4056  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4057  break;
4058  }
4059  }
4060 
4061  return NULL;
4062 }
4063 
4064 static void free_input_thread(int i)
4065 {
4066  InputFile *f = input_files[i];
4067  AVPacket pkt;
4068 
4069  if (!f || !f->in_thread_queue)
4070  return;
4072  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4073  av_packet_unref(&pkt);
4074 
4075  pthread_join(f->thread, NULL);
4076  f->joined = 1;
4077  av_thread_message_queue_free(&f->in_thread_queue);
4078 }
4079 
4080 static void free_input_threads(void)
4081 {
4082  int i;
4083 
4084  for (i = 0; i < nb_input_files; i++)
4085  free_input_thread(i);
4086 }
4087 
4088 static int init_input_thread(int i)
4089 {
4090  int ret;
4091  InputFile *f = input_files[i];
4092 
4093  if (nb_input_files == 1)
4094  return 0;
4095 
4096  if (f->ctx->pb ? !f->ctx->pb->seekable :
4097  strcmp(f->ctx->iformat->name, "lavfi"))
4098  f->non_blocking = 1;
4099  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4100  f->thread_queue_size, sizeof(AVPacket));
4101  if (ret < 0)
4102  return ret;
4103 
4104  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4105  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4106  av_thread_message_queue_free(&f->in_thread_queue);
4107  return AVERROR(ret);
4108  }
4109 
4110  return 0;
4111 }
4112 
4113 static int init_input_threads(void)
4114 {
4115  int i, ret;
4116 
4117  for (i = 0; i < nb_input_files; i++) {
4118  ret = init_input_thread(i);
4119  if (ret < 0)
4120  return ret;
4121  }
4122  return 0;
4123 }
4124 
4125 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4126 {
4127  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4128  f->non_blocking ?
4130 }
4131 #endif
4132 
4134 {
4135  if (f->rate_emu) {
4136  int i;
4137  for (i = 0; i < f->nb_streams; i++) {
4138  InputStream *ist = input_streams[f->ist_index + i];
4139  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4140  int64_t now = av_gettime_relative() - ist->start;
4141  if (pts > now)
4142  return AVERROR(EAGAIN);
4143  }
4144  }
4145 
4146 #if HAVE_THREADS
4147  if (nb_input_files > 1)
4148  return get_input_packet_mt(f, pkt);
4149 #endif
4150  return av_read_frame(f->ctx, pkt);
4151 }
4152 
4153 static int got_eagain(void)
4154 {
4155  int i;
4156  for (i = 0; i < nb_output_streams; i++)
4157  if (output_streams[i]->unavailable)
4158  return 1;
4159  return 0;
4160 }
4161 
4162 static void reset_eagain(void)
4163 {
4164  int i;
4165  for (i = 0; i < nb_input_files; i++)
4166  input_files[i]->eagain = 0;
4167  for (i = 0; i < nb_output_streams; i++)
4168  output_streams[i]->unavailable = 0;
4169 }
4170 
4171 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4172 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4173  AVRational time_base)
4174 {
4175  int ret;
4176 
4177  if (!*duration) {
4178  *duration = tmp;
4179  return tmp_time_base;
4180  }
4181 
4182  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4183  if (ret < 0) {
4184  *duration = tmp;
4185  return tmp_time_base;
4186  }
4187 
4188  return time_base;
4189 }
4190 
4192 {
4193  InputStream *ist;
4194  AVCodecContext *avctx;
4195  int i, ret, has_audio = 0;
4196  int64_t duration = 0;
4197 
4198  ret = av_seek_frame(is, -1, is->start_time, 0);
4199  if (ret < 0)
4200  return ret;
4201 
4202  for (i = 0; i < ifile->nb_streams; i++) {
4203  ist = input_streams[ifile->ist_index + i];
4204  avctx = ist->dec_ctx;
4205 
4206  /* duration is the length of the last frame in a stream
4207  * when audio stream is present we don't care about
4208  * last video frame length because it's not defined exactly */
4209  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4210  has_audio = 1;
4211  }
4212 
4213  for (i = 0; i < ifile->nb_streams; i++) {
4214  ist = input_streams[ifile->ist_index + i];
4215  avctx = ist->dec_ctx;
4216 
4217  if (has_audio) {
4218  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4219  AVRational sample_rate = {1, avctx->sample_rate};
4220 
4221  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4222  } else {
4223  continue;
4224  }
4225  } else {
4226  if (ist->framerate.num) {
4227  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4228  } else if (ist->st->avg_frame_rate.num) {
4229  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4230  } else {
4231  duration = 1;
4232  }
4233  }
4234  if (!ifile->duration)
4235  ifile->time_base = ist->st->time_base;
4236  /* the total duration of the stream, max_pts - min_pts is
4237  * the duration of the stream without the last frame */
4238  duration += ist->max_pts - ist->min_pts;
4239  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4240  ifile->time_base);
4241  }
4242 
4243  if (ifile->loop > 0)
4244  ifile->loop--;
4245 
4246  return ret;
4247 }
4248 
4249 /*
4250  * Return
4251  * - 0 -- one packet was read and processed
4252  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4253  * this function should be called again
4254  * - AVERROR_EOF -- this function should not be called again
4255  */
4256 static int process_input(int file_index)
4257 {
4258  InputFile *ifile = input_files[file_index];
4260  InputStream *ist;
4261  AVPacket pkt;
4262  int ret, thread_ret, i, j;
4263  int64_t duration;
4264  int64_t pkt_dts;
4265 
4266  is = ifile->ctx;
4267  ret = get_input_packet(ifile, &pkt);
4268 
4269  if (ret == AVERROR(EAGAIN)) {
4270  ifile->eagain = 1;
4271  return ret;
4272  }
4273  if (ret < 0 && ifile->loop) {
4274  AVCodecContext *avctx;
4275  for (i = 0; i < ifile->nb_streams; i++) {
4276  ist = input_streams[ifile->ist_index + i];
4277  avctx = ist->dec_ctx;
4278  if (ist->decoding_needed) {
4279  ret = process_input_packet(ist, NULL, 1);
4280  if (ret>0)
4281  return 0;
4282  avcodec_flush_buffers(avctx);
4283  }
4284  }
4285 #if HAVE_THREADS
4286  free_input_thread(file_index);
4287 #endif
4288  ret = seek_to_start(ifile, is);
4289 #if HAVE_THREADS
4290  thread_ret = init_input_thread(file_index);
4291  if (thread_ret < 0)
4292  return thread_ret;
4293 #endif
4294  if (ret < 0)
4295  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4296  else
4297  ret = get_input_packet(ifile, &pkt);
4298  if (ret == AVERROR(EAGAIN)) {
4299  ifile->eagain = 1;
4300  return ret;
4301  }
4302  }
4303  if (ret < 0) {
4304  if (ret != AVERROR_EOF) {
4305  print_error(is->url, ret);
4306  if (exit_on_error)
4307  exit_program(1);
4308  }
4309 
4310  for (i = 0; i < ifile->nb_streams; i++) {
4311  ist = input_streams[ifile->ist_index + i];
4312  if (ist->decoding_needed) {
4313  ret = process_input_packet(ist, NULL, 0);
4314  if (ret>0)
4315  return 0;
4316  }
4317 
4318  /* mark all outputs that don't go through lavfi as finished */
4319  for (j = 0; j < nb_output_streams; j++) {
4320  OutputStream *ost = output_streams[j];
4321 
4322  if (ost->source_index == ifile->ist_index + i &&
4323  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4324  finish_output_stream(ost);
4325  }
4326  }
4327 
4328  ifile->eof_reached = 1;
4329  return AVERROR(EAGAIN);
4330  }
4331 
4332  reset_eagain();
4333 
4334  if (do_pkt_dump) {
4336  is->streams[pkt.stream_index]);
4337  }
4338  /* the following test is needed in case new streams appear
4339  dynamically in stream : we ignore them */
4340  if (pkt.stream_index >= ifile->nb_streams) {
4341  report_new_stream(file_index, &pkt);
4342  goto discard_packet;
4343  }
4344 
4345  ist = input_streams[ifile->ist_index + pkt.stream_index];
4346 
4347  ist->data_size += pkt.size;
4348  ist->nb_packets++;
4349 
4350  if (ist->discard)
4351  goto discard_packet;
4352 
4353  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4355  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4356  if (exit_on_error)
4357  exit_program(1);
4358  }
4359 
4360  if (debug_ts) {
4361  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4362  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4366  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4367  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4368  av_ts2str(input_files[ist->file_index]->ts_offset),
4369  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4370  }
4371 
4372  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4373  int64_t stime, stime2;
4374  // Correcting starttime based on the enabled streams
4375  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4376  // so we instead do it here as part of discontinuity handling
4377  if ( ist->next_dts == AV_NOPTS_VALUE
4378  && ifile->ts_offset == -is->start_time
4379  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4380  int64_t new_start_time = INT64_MAX;
4381  for (i=0; i<is->nb_streams; i++) {
4382  AVStream *st = is->streams[i];
4383  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4384  continue;
4385  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4386  }
4387  if (new_start_time > is->start_time) {
4388  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4389  ifile->ts_offset = -new_start_time;
4390  }
4391  }
4392 
4393  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4394  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4395  ist->wrap_correction_done = 1;
4396 
4397  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4398  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4399  ist->wrap_correction_done = 0;
4400  }
4401  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4402  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4403  ist->wrap_correction_done = 0;
4404  }
4405  }
4406 
4407  /* add the stream-global side data to the first packet */
4408  if (ist->nb_packets == 1) {
4409  for (i = 0; i < ist->st->nb_side_data; i++) {
4410  AVPacketSideData *src_sd = &ist->st->side_data[i];
4411  uint8_t *dst_data;
4412 
4413  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4414  continue;
4415 
4416  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4417  continue;
4418 
4419  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4420  if (!dst_data)
4421  exit_program(1);
4422 
4423  memcpy(dst_data, src_sd->data, src_sd->size);
4424  }
4425  }
4426 
4427  if (pkt.dts != AV_NOPTS_VALUE)
4428  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4429  if (pkt.pts != AV_NOPTS_VALUE)
4430  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4431 
4432  if (pkt.pts != AV_NOPTS_VALUE)
4433  pkt.pts *= ist->ts_scale;
4434  if (pkt.dts != AV_NOPTS_VALUE)
4435  pkt.dts *= ist->ts_scale;
4436 
4438  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4440  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4441  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4442  int64_t delta = pkt_dts - ifile->last_ts;
4443  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4444  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4445  ifile->ts_offset -= delta;
4447  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4448  delta, ifile->ts_offset);
4449  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4450  if (pkt.pts != AV_NOPTS_VALUE)
4451  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4452  }
4453  }
4454 
4455  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4456  if (pkt.pts != AV_NOPTS_VALUE) {
4457  pkt.pts += duration;
4458  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4459  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4460  }
4461 
4462  if (pkt.dts != AV_NOPTS_VALUE)
4463  pkt.dts += duration;
4464 
4466  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4468  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4469  !copy_ts) {
4470  int64_t delta = pkt_dts - ist->next_dts;
4471  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4472  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4473  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4474  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4475  ifile->ts_offset -= delta;
4477  "timestamp discontinuity for stream #%d:%d "
4478  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4479  ist->file_index, ist->st->index, ist->st->id,
4481  delta, ifile->ts_offset);
4482  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4483  if (pkt.pts != AV_NOPTS_VALUE)
4484  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4485  }
4486  } else {
4487  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4488  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4489  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4490  pkt.dts = AV_NOPTS_VALUE;
4491  }
4492  if (pkt.pts != AV_NOPTS_VALUE){
4493  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4494  delta = pkt_pts - ist->next_dts;
4495  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4496  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4497  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4498  pkt.pts = AV_NOPTS_VALUE;
4499  }
4500  }
4501  }
4502  }
4503 
4504  if (pkt.dts != AV_NOPTS_VALUE)
4505  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4506 
4507  if (debug_ts) {
4508  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4510  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4511  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4512  av_ts2str(input_files[ist->file_index]->ts_offset),
4513  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4514  }
4515 
4516  sub2video_heartbeat(ist, pkt.pts);
4517 
4518  process_input_packet(ist, &pkt, 0);
4519 
4520 discard_packet:
4521  av_packet_unref(&pkt);
4522 
4523  return 0;
4524 }
4525 
4526 /**
4527  * Perform a step of transcoding for the specified filter graph.
4528  *
4529  * @param[in] graph filter graph to consider
4530  * @param[out] best_ist input stream where a frame would allow to continue
4531  * @return 0 for success, <0 for error
4532  */
4533 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4534 {
4535  int i, ret;
4536  int nb_requests, nb_requests_max = 0;
4537  InputFilter *ifilter;
4538  InputStream *ist;
4539 
4540  *best_ist = NULL;
4541  ret = avfilter_graph_request_oldest(graph->graph);
4542  if (ret >= 0)
4543  return reap_filters(0);
4544 
4545  if (ret == AVERROR_EOF) {
4546  ret = reap_filters(1);
4547  for (i = 0; i < graph->nb_outputs; i++)
4548  close_output_stream(graph->outputs[i]->ost);
4549  return ret;
4550  }
4551  if (ret != AVERROR(EAGAIN))
4552  return ret;
4553 
4554  for (i = 0; i < graph->nb_inputs; i++) {
4555  ifilter = graph->inputs[i];
4556  ist = ifilter->ist;
4557  if (input_files[ist->file_index]->eagain ||
4558  input_files[ist->file_index]->eof_reached)
4559  continue;
4560  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);