FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570  av_dict_free(&ost->swr_opts);
571 
574 
575  if (ost->muxing_queue) {
576  while (av_fifo_size(ost->muxing_queue)) {
577  AVPacket pkt;
578  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
579  av_packet_unref(&pkt);
580  }
582  }
583 
584  av_freep(&output_streams[i]);
585  }
586 #if HAVE_THREADS
587  free_input_threads();
588 #endif
589  for (i = 0; i < nb_input_files; i++) {
590  avformat_close_input(&input_files[i]->ctx);
591  av_freep(&input_files[i]);
592  }
593  for (i = 0; i < nb_input_streams; i++) {
594  InputStream *ist = input_streams[i];
595 
598  av_dict_free(&ist->decoder_opts);
601  av_freep(&ist->filters);
602  av_freep(&ist->hwaccel_device);
603  av_freep(&ist->dts_buffer);
604 
606 
607  av_freep(&input_streams[i]);
608  }
609 
610  if (vstats_file) {
611  if (fclose(vstats_file))
613  "Error closing vstats file, loss of information possible: %s\n",
614  av_err2str(AVERROR(errno)));
615  }
617 
618  av_freep(&input_streams);
619  av_freep(&input_files);
620  av_freep(&output_streams);
621  av_freep(&output_files);
622 
623  uninit_opts();
624 
626 
627  if (received_sigterm) {
628  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629  (int) received_sigterm);
630  } else if (ret && atomic_load(&transcode_init_done)) {
631  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632  }
633  term_exit();
634  ffmpeg_exited = 1;
635 }
636 
638 {
639  AVDictionaryEntry *t = NULL;
640 
641  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
643  }
644 }
645 
647 {
649  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
651  exit_program(1);
652  }
653 }
654 
655 static void abort_codec_experimental(AVCodec *c, int encoder)
656 {
657  exit_program(1);
658 }
659 
660 static void update_benchmark(const char *fmt, ...)
661 {
662  if (do_benchmark_all) {
664  va_list va;
665  char buf[1024];
666 
667  if (fmt) {
668  va_start(va, fmt);
669  vsnprintf(buf, sizeof(buf), fmt, va);
670  va_end(va);
672  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
673  t.user_usec - current_time.user_usec,
674  t.sys_usec - current_time.sys_usec,
675  t.real_usec - current_time.real_usec, buf);
676  }
677  current_time = t;
678  }
679 }
680 
682 {
683  int i;
684  for (i = 0; i < nb_output_streams; i++) {
685  OutputStream *ost2 = output_streams[i];
686  ost2->finished |= ost == ost2 ? this_stream : others;
687  }
688 }
689 
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 {
692  AVFormatContext *s = of->ctx;
693  AVStream *st = ost->st;
694  int ret;
695 
696  /*
697  * Audio encoders may split the packets -- #frames in != #packets out.
698  * But there is no reordering, so we can limit the number of output packets
699  * by simply dropping them here.
700  * Counting encoded video frames needs to be done separately because of
701  * reordering, see do_video_out().
702  * Do not count the packet when unqueued because it has been counted when queued.
703  */
704  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705  if (ost->frame_number >= ost->max_frames) {
706  av_packet_unref(pkt);
707  return;
708  }
709  ost->frame_number++;
710  }
711 
712  if (!of->header_written) {
713  AVPacket tmp_pkt = {0};
714  /* the muxer is not initialized yet, buffer the packet */
715  if (!av_fifo_space(ost->muxing_queue)) {
716  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717  ost->max_muxing_queue_size);
718  if (new_size <= av_fifo_size(ost->muxing_queue)) {
720  "Too many packets buffered for output stream %d:%d.\n",
721  ost->file_index, ost->st->index);
722  exit_program(1);
723  }
724  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
725  if (ret < 0)
726  exit_program(1);
727  }
728  ret = av_packet_make_refcounted(pkt);
729  if (ret < 0)
730  exit_program(1);
731  av_packet_move_ref(&tmp_pkt, pkt);
732  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
733  return;
734  }
735 
738  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 
740  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741  int i;
743  NULL);
744  ost->quality = sd ? AV_RL32(sd) : -1;
745  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 
747  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748  if (sd && i < sd[5])
749  ost->error[i] = AV_RL64(sd + 8 + 8*i);
750  else
751  ost->error[i] = -1;
752  }
753 
754  if (ost->frame_rate.num && ost->is_cfr) {
755  if (pkt->duration > 0)
756  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
758  ost->mux_timebase);
759  }
760  }
761 
762  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 
764  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765  if (pkt->dts != AV_NOPTS_VALUE &&
766  pkt->pts != AV_NOPTS_VALUE &&
767  pkt->dts > pkt->pts) {
768  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769  pkt->dts, pkt->pts,
770  ost->file_index, ost->st->index);
771  pkt->pts =
772  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775  }
777  pkt->dts != AV_NOPTS_VALUE &&
778  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779  ost->last_mux_dts != AV_NOPTS_VALUE) {
780  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781  if (pkt->dts < max) {
782  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783  av_log(s, loglevel, "Non-monotonous DTS in output stream "
784  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786  if (exit_on_error) {
787  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
788  exit_program(1);
789  }
790  av_log(s, loglevel, "changing to %"PRId64". This may result "
791  "in incorrect timestamps in the output file.\n",
792  max);
793  if (pkt->pts >= pkt->dts)
794  pkt->pts = FFMAX(pkt->pts, max);
795  pkt->dts = max;
796  }
797  }
798  }
799  ost->last_mux_dts = pkt->dts;
800 
801  ost->data_size += pkt->size;
802  ost->packets_written++;
803 
804  pkt->stream_index = ost->index;
805 
806  if (debug_ts) {
807  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
810  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
811  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
812  pkt->size
813  );
814  }
815 
816  ret = av_interleaved_write_frame(s, pkt);
817  if (ret < 0) {
818  print_error("av_interleaved_write_frame()", ret);
819  main_return_code = 1;
821  }
822  av_packet_unref(pkt);
823 }
824 
826 {
827  OutputFile *of = output_files[ost->file_index];
828 
829  ost->finished |= ENCODER_FINISHED;
830  if (of->shortest) {
831  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
832  of->recording_time = FFMIN(of->recording_time, end);
833  }
834 }
835 
836 /*
837  * Send a single packet to the output, applying any bitstream filters
838  * associated with the output stream. This may result in any number
839  * of packets actually being written, depending on what bitstream
840  * filters are applied. The supplied packet is consumed and will be
841  * blank (as if newly-allocated) when this function returns.
842  *
843  * If eof is set, instead indicate EOF to all bitstream filters and
844  * therefore flush any delayed packets to the output. A blank packet
845  * must be supplied in this case.
846  */
848  OutputStream *ost, int eof)
849 {
850  int ret = 0;
851 
852  /* apply the output bitstream filters, if any */
853  if (ost->nb_bitstream_filters) {
854  int idx;
855 
856  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
857  if (ret < 0)
858  goto finish;
859 
860  eof = 0;
861  idx = 1;
862  while (idx) {
863  /* get a packet from the previous filter up the chain */
864  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865  if (ret == AVERROR(EAGAIN)) {
866  ret = 0;
867  idx--;
868  continue;
869  } else if (ret == AVERROR_EOF) {
870  eof = 1;
871  } else if (ret < 0)
872  goto finish;
873 
874  /* send it to the next filter down the chain or to the muxer */
875  if (idx < ost->nb_bitstream_filters) {
876  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
877  if (ret < 0)
878  goto finish;
879  idx++;
880  eof = 0;
881  } else if (eof)
882  goto finish;
883  else
884  write_packet(of, pkt, ost, 0);
885  }
886  } else if (!eof)
887  write_packet(of, pkt, ost, 0);
888 
889 finish:
890  if (ret < 0 && ret != AVERROR_EOF) {
891  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
893  if(exit_on_error)
894  exit_program(1);
895  }
896 }
897 
899 {
900  OutputFile *of = output_files[ost->file_index];
901 
902  if (of->recording_time != INT64_MAX &&
904  AV_TIME_BASE_Q) >= 0) {
905  close_output_stream(ost);
906  return 0;
907  }
908  return 1;
909 }
910 
912  AVFrame *frame)
913 {
914  AVCodecContext *enc = ost->enc_ctx;
915  AVPacket pkt;
916  int ret;
917 
918  av_init_packet(&pkt);
919  pkt.data = NULL;
920  pkt.size = 0;
921 
922  if (!check_recording_time(ost))
923  return;
924 
925  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926  frame->pts = ost->sync_opts;
927  ost->sync_opts = frame->pts + frame->nb_samples;
928  ost->samples_encoded += frame->nb_samples;
929  ost->frames_encoded++;
930 
931  av_assert0(pkt.size || !pkt.data);
933  if (debug_ts) {
934  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937  enc->time_base.num, enc->time_base.den);
938  }
939 
940  ret = avcodec_send_frame(enc, frame);
941  if (ret < 0)
942  goto error;
943 
944  while (1) {
945  ret = avcodec_receive_packet(enc, &pkt);
946  if (ret == AVERROR(EAGAIN))
947  break;
948  if (ret < 0)
949  goto error;
950 
951  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 
953  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
954 
955  if (debug_ts) {
956  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
958  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
959  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
960  }
961 
962  output_packet(of, &pkt, ost, 0);
963  }
964 
965  return;
966 error:
967  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
968  exit_program(1);
969 }
970 
971 static void do_subtitle_out(OutputFile *of,
972  OutputStream *ost,
973  AVSubtitle *sub)
974 {
975  int subtitle_out_max_size = 1024 * 1024;
976  int subtitle_out_size, nb, i;
977  AVCodecContext *enc;
978  AVPacket pkt;
979  int64_t pts;
980 
981  if (sub->pts == AV_NOPTS_VALUE) {
982  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
983  if (exit_on_error)
984  exit_program(1);
985  return;
986  }
987 
988  enc = ost->enc_ctx;
989 
990  if (!subtitle_out) {
991  subtitle_out = av_malloc(subtitle_out_max_size);
992  if (!subtitle_out) {
993  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
994  exit_program(1);
995  }
996  }
997 
998  /* Note: DVB subtitle need one packet to draw them and one other
999  packet to clear them */
1000  /* XXX: signal it in the codec context ? */
1001  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1002  nb = 2;
1003  else
1004  nb = 1;
1005 
1006  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007  pts = sub->pts;
1008  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009  pts -= output_files[ost->file_index]->start_time;
1010  for (i = 0; i < nb; i++) {
1011  unsigned save_num_rects = sub->num_rects;
1012 
1013  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  sub->pts = pts;
1018  // start_display_time is required to be 0
1019  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020  sub->end_display_time -= sub->start_display_time;
1021  sub->start_display_time = 0;
1022  if (i == 1)
1023  sub->num_rects = 0;
1024 
1025  ost->frames_encoded++;
1026 
1027  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028  subtitle_out_max_size, sub);
1029  if (i == 1)
1030  sub->num_rects = save_num_rects;
1031  if (subtitle_out_size < 0) {
1032  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1033  exit_program(1);
1034  }
1035 
1036  av_init_packet(&pkt);
1037  pkt.data = subtitle_out;
1038  pkt.size = subtitle_out_size;
1039  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042  /* XXX: the pts correction is handled here. Maybe handling
1043  it in the codec would be better */
1044  if (i == 0)
1045  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046  else
1047  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1048  }
1049  pkt.dts = pkt.pts;
1050  output_packet(of, &pkt, ost, 0);
1051  }
1052 }
1053 
1054 static void do_video_out(OutputFile *of,
1055  OutputStream *ost,
1056  AVFrame *next_picture,
1057  double sync_ipts)
1058 {
1059  int ret, format_video_sync;
1060  AVPacket pkt;
1061  AVCodecContext *enc = ost->enc_ctx;
1062  AVCodecParameters *mux_par = ost->st->codecpar;
1063  AVRational frame_rate;
1064  int nb_frames, nb0_frames, i;
1065  double delta, delta0;
1066  double duration = 0;
1067  int frame_size = 0;
1068  InputStream *ist = NULL;
1070 
1071  if (ost->source_index >= 0)
1072  ist = input_streams[ost->source_index];
1073 
1074  frame_rate = av_buffersink_get_frame_rate(filter);
1075  if (frame_rate.num > 0 && frame_rate.den > 0)
1076  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 
1078  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 
1081  if (!ost->filters_script &&
1082  !ost->filters &&
1083  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1084  next_picture &&
1085  ist &&
1086  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1087  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088  }
1089 
1090  if (!next_picture) {
1091  //end, flushing
1092  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1093  ost->last_nb0_frames[1],
1094  ost->last_nb0_frames[2]);
1095  } else {
1096  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1097  delta = delta0 + duration;
1098 
1099  /* by default, we output a single frame */
1100  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101  nb_frames = 1;
1102 
1103  format_video_sync = video_sync_method;
1104  if (format_video_sync == VSYNC_AUTO) {
1105  if(!strcmp(of->ctx->oformat->name, "avi")) {
1106  format_video_sync = VSYNC_VFR;
1107  } else
1108  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1109  if ( ist
1110  && format_video_sync == VSYNC_CFR
1111  && input_files[ist->file_index]->ctx->nb_streams == 1
1112  && input_files[ist->file_index]->input_ts_offset == 0) {
1113  format_video_sync = VSYNC_VSCFR;
1114  }
1115  if (format_video_sync == VSYNC_CFR && copy_ts) {
1116  format_video_sync = VSYNC_VSCFR;
1117  }
1118  }
1119  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1120 
1121  if (delta0 < 0 &&
1122  delta > 0 &&
1123  format_video_sync != VSYNC_PASSTHROUGH &&
1124  format_video_sync != VSYNC_DROP) {
1125  if (delta0 < -0.6) {
1126  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1127  } else
1128  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1129  sync_ipts = ost->sync_opts;
1130  duration += delta0;
1131  delta0 = 0;
1132  }
1133 
1134  switch (format_video_sync) {
1135  case VSYNC_VSCFR:
1136  if (ost->frame_number == 0 && delta0 >= 0.5) {
1137  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138  delta = duration;
1139  delta0 = 0;
1140  ost->sync_opts = lrint(sync_ipts);
1141  }
1142  case VSYNC_CFR:
1143  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1145  nb_frames = 0;
1146  } else if (delta < -1.1)
1147  nb_frames = 0;
1148  else if (delta > 1.1) {
1149  nb_frames = lrintf(delta);
1150  if (delta0 > 1.1)
1151  nb0_frames = lrintf(delta0 - 0.6);
1152  }
1153  break;
1154  case VSYNC_VFR:
1155  if (delta <= -0.6)
1156  nb_frames = 0;
1157  else if (delta > 0.6)
1158  ost->sync_opts = lrint(sync_ipts);
1159  break;
1160  case VSYNC_DROP:
1161  case VSYNC_PASSTHROUGH:
1162  ost->sync_opts = lrint(sync_ipts);
1163  break;
1164  default:
1165  av_assert0(0);
1166  }
1167  }
1168 
1169  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1170  nb0_frames = FFMIN(nb0_frames, nb_frames);
1171 
1172  memmove(ost->last_nb0_frames + 1,
1173  ost->last_nb0_frames,
1174  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1175  ost->last_nb0_frames[0] = nb0_frames;
1176 
1177  if (nb0_frames == 0 && ost->last_dropped) {
1178  nb_frames_drop++;
1180  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1181  ost->frame_number, ost->st->index, ost->last_frame->pts);
1182  }
1183  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1184  if (nb_frames > dts_error_threshold * 30) {
1185  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1186  nb_frames_drop++;
1187  return;
1188  }
1189  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1190  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1191  if (nb_frames_dup > dup_warning) {
1192  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1193  dup_warning *= 10;
1194  }
1195  }
1196  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1197 
1198  /* duplicates frame if needed */
1199  for (i = 0; i < nb_frames; i++) {
1200  AVFrame *in_picture;
1201  int forced_keyframe = 0;
1202  double pts_time;
1203  av_init_packet(&pkt);
1204  pkt.data = NULL;
1205  pkt.size = 0;
1206 
1207  if (i < nb0_frames && ost->last_frame) {
1208  in_picture = ost->last_frame;
1209  } else
1210  in_picture = next_picture;
1211 
1212  if (!in_picture)
1213  return;
1214 
1215  in_picture->pts = ost->sync_opts;
1216 
1217  if (!check_recording_time(ost))
1218  return;
1219 
1221  ost->top_field_first >= 0)
1222  in_picture->top_field_first = !!ost->top_field_first;
1223 
1224  if (in_picture->interlaced_frame) {
1225  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227  else
1228  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229  } else
1230  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 
1232  in_picture->quality = enc->global_quality;
1233  in_picture->pict_type = 0;
1234 
1235  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1236  in_picture->pts != AV_NOPTS_VALUE)
1237  ost->forced_kf_ref_pts = in_picture->pts;
1238 
1239  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1241  if (ost->forced_kf_index < ost->forced_kf_count &&
1242  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243  ost->forced_kf_index++;
1244  forced_keyframe = 1;
1245  } else if (ost->forced_keyframes_pexpr) {
1246  double res;
1247  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1250  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1256  res);
1257  if (res) {
1258  forced_keyframe = 1;
1264  }
1265 
1267  } else if ( ost->forced_keyframes
1268  && !strncmp(ost->forced_keyframes, "source", 6)
1269  && in_picture->key_frame==1
1270  && !i) {
1271  forced_keyframe = 1;
1272  }
1273 
1274  if (forced_keyframe) {
1275  in_picture->pict_type = AV_PICTURE_TYPE_I;
1276  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1277  }
1278 
1280  if (debug_ts) {
1281  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1282  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1283  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1284  enc->time_base.num, enc->time_base.den);
1285  }
1286 
1287  ost->frames_encoded++;
1288 
1289  ret = avcodec_send_frame(enc, in_picture);
1290  if (ret < 0)
1291  goto error;
1292  // Make sure Closed Captions will not be duplicated
1294 
1295  while (1) {
1296  ret = avcodec_receive_packet(enc, &pkt);
1297  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1298  if (ret == AVERROR(EAGAIN))
1299  break;
1300  if (ret < 0)
1301  goto error;
1302 
1303  if (debug_ts) {
1304  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1305  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1306  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1307  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1308  }
1309 
1310  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1311  pkt.pts = ost->sync_opts;
1312 
1313  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1314 
1315  if (debug_ts) {
1316  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1317  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1318  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1319  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1320  }
1321 
1322  frame_size = pkt.size;
1323  output_packet(of, &pkt, ost, 0);
1324 
1325  /* if two pass, output log */
1326  if (ost->logfile && enc->stats_out) {
1327  fprintf(ost->logfile, "%s", enc->stats_out);
1328  }
1329  }
1330  ost->sync_opts++;
1331  /*
1332  * For video, number of frames in == number of packets out.
1333  * But there may be reordering, so we can't throw away frames on encoder
1334  * flush, we need to limit them here, before they go into encoder.
1335  */
1336  ost->frame_number++;
1337 
1338  if (vstats_filename && frame_size)
1339  do_video_stats(ost, frame_size);
1340  }
1341 
1342  if (!ost->last_frame)
1343  ost->last_frame = av_frame_alloc();
1344  av_frame_unref(ost->last_frame);
1345  if (next_picture && ost->last_frame)
1346  av_frame_ref(ost->last_frame, next_picture);
1347  else
1348  av_frame_free(&ost->last_frame);
1349 
1350  return;
1351 error:
1352  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1353  exit_program(1);
1354 }
1355 
1356 static double psnr(double d)
1357 {
1358  return -10.0 * log10(d);
1359 }
1360 
1362 {
1363  AVCodecContext *enc;
1364  int frame_number;
1365  double ti1, bitrate, avg_bitrate;
1366 
1367  /* this is executed just the first time do_video_stats is called */
1368  if (!vstats_file) {
1369  vstats_file = fopen(vstats_filename, "w");
1370  if (!vstats_file) {
1371  perror("fopen");
1372  exit_program(1);
1373  }
1374  }
1375 
1376  enc = ost->enc_ctx;
1377  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1378  frame_number = ost->st->nb_frames;
1379  if (vstats_version <= 1) {
1380  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1381  ost->quality / (float)FF_QP2LAMBDA);
1382  } else {
1383  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1384  ost->quality / (float)FF_QP2LAMBDA);
1385  }
1386 
1387  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1388  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1389 
1390  fprintf(vstats_file,"f_size= %6d ", frame_size);
1391  /* compute pts value */
1392  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1393  if (ti1 < 0.01)
1394  ti1 = 0.01;
1395 
1396  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1397  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1398  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1399  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1400  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1401  }
1402 }
1403 
1404 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1405 
1407 {
1408  OutputFile *of = output_files[ost->file_index];
1409  int i;
1410 
1412 
1413  if (of->shortest) {
1414  for (i = 0; i < of->ctx->nb_streams; i++)
1415  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1416  }
1417 }
1418 
1419 /**
1420  * Get and encode new output from any of the filtergraphs, without causing
1421  * activity.
1422  *
1423  * @return 0 for success, <0 for severe errors
1424  */
1425 static int reap_filters(int flush)
1426 {
1427  AVFrame *filtered_frame = NULL;
1428  int i;
1429 
1430  /* Reap all buffers present in the buffer sinks */
1431  for (i = 0; i < nb_output_streams; i++) {
1432  OutputStream *ost = output_streams[i];
1433  OutputFile *of = output_files[ost->file_index];
1435  AVCodecContext *enc = ost->enc_ctx;
1436  int ret = 0;
1437 
1438  if (!ost->filter || !ost->filter->graph->graph)
1439  continue;
1440  filter = ost->filter->filter;
1441 
1442  if (!ost->initialized) {
1443  char error[1024] = "";
1444  ret = init_output_stream(ost, error, sizeof(error));
1445  if (ret < 0) {
1446  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1447  ost->file_index, ost->index, error);
1448  exit_program(1);
1449  }
1450  }
1451 
1452  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1453  return AVERROR(ENOMEM);
1454  }
1455  filtered_frame = ost->filtered_frame;
1456 
1457  while (1) {
1458  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1459  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1461  if (ret < 0) {
1462  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1464  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1465  } else if (flush && ret == AVERROR_EOF) {
1467  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1468  }
1469  break;
1470  }
1471  if (ost->finished) {
1472  av_frame_unref(filtered_frame);
1473  continue;
1474  }
1475  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1476  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1477  AVRational filter_tb = av_buffersink_get_time_base(filter);
1478  AVRational tb = enc->time_base;
1479  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1480 
1481  tb.den <<= extra_bits;
1482  float_pts =
1483  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1484  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1485  float_pts /= 1 << extra_bits;
1486  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1487  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1488 
1489  filtered_frame->pts =
1490  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1491  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1492  }
1493 
1494  switch (av_buffersink_get_type(filter)) {
1495  case AVMEDIA_TYPE_VIDEO:
1496  if (!ost->frame_aspect_ratio.num)
1497  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1498 
1499  if (debug_ts) {
1500  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1502  float_pts,
1503  enc->time_base.num, enc->time_base.den);
1504  }
1505 
1506  do_video_out(of, ost, filtered_frame, float_pts);
1507  break;
1508  case AVMEDIA_TYPE_AUDIO:
1509  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510  enc->channels != filtered_frame->channels) {
1512  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1513  break;
1514  }
1515  do_audio_out(of, ost, filtered_frame);
1516  break;
1517  default:
1518  // TODO support subtitle filters
1519  av_assert0(0);
1520  }
1521 
1522  av_frame_unref(filtered_frame);
1523  }
1524  }
1525 
1526  return 0;
1527 }
1528 
1529 static void print_final_stats(int64_t total_size)
1530 {
1531  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532  uint64_t subtitle_size = 0;
1533  uint64_t data_size = 0;
1534  float percent = -1.0;
1535  int i, j;
1536  int pass1_used = 1;
1537 
1538  for (i = 0; i < nb_output_streams; i++) {
1539  OutputStream *ost = output_streams[i];
1540  switch (ost->enc_ctx->codec_type) {
1541  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544  default: other_size += ost->data_size; break;
1545  }
1546  extra_size += ost->enc_ctx->extradata_size;
1547  data_size += ost->data_size;
1550  pass1_used = 0;
1551  }
1552 
1553  if (data_size && total_size>0 && total_size >= data_size)
1554  percent = 100.0 * (total_size - data_size) / data_size;
1555 
1556  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557  video_size / 1024.0,
1558  audio_size / 1024.0,
1559  subtitle_size / 1024.0,
1560  other_size / 1024.0,
1561  extra_size / 1024.0);
1562  if (percent >= 0.0)
1563  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1564  else
1565  av_log(NULL, AV_LOG_INFO, "unknown");
1566  av_log(NULL, AV_LOG_INFO, "\n");
1567 
1568  /* print verbose per-stream stats */
1569  for (i = 0; i < nb_input_files; i++) {
1570  InputFile *f = input_files[i];
1571  uint64_t total_packets = 0, total_size = 0;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574  i, f->ctx->url);
1575 
1576  for (j = 0; j < f->nb_streams; j++) {
1577  InputStream *ist = input_streams[f->ist_index + j];
1578  enum AVMediaType type = ist->dec_ctx->codec_type;
1579 
1580  total_size += ist->data_size;
1581  total_packets += ist->nb_packets;
1582 
1583  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584  i, j, media_type_string(type));
1585  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586  ist->nb_packets, ist->data_size);
1587 
1588  if (ist->decoding_needed) {
1589  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590  ist->frames_decoded);
1591  if (type == AVMEDIA_TYPE_AUDIO)
1592  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593  av_log(NULL, AV_LOG_VERBOSE, "; ");
1594  }
1595 
1596  av_log(NULL, AV_LOG_VERBOSE, "\n");
1597  }
1598 
1599  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600  total_packets, total_size);
1601  }
1602 
1603  for (i = 0; i < nb_output_files; i++) {
1604  OutputFile *of = output_files[i];
1605  uint64_t total_packets = 0, total_size = 0;
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608  i, of->ctx->url);
1609 
1610  for (j = 0; j < of->ctx->nb_streams; j++) {
1611  OutputStream *ost = output_streams[of->ost_index + j];
1612  enum AVMediaType type = ost->enc_ctx->codec_type;
1613 
1614  total_size += ost->data_size;
1615  total_packets += ost->packets_written;
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618  i, j, media_type_string(type));
1619  if (ost->encoding_needed) {
1620  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621  ost->frames_encoded);
1622  if (type == AVMEDIA_TYPE_AUDIO)
1623  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624  av_log(NULL, AV_LOG_VERBOSE, "; ");
1625  }
1626 
1627  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628  ost->packets_written, ost->data_size);
1629 
1630  av_log(NULL, AV_LOG_VERBOSE, "\n");
1631  }
1632 
1633  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634  total_packets, total_size);
1635  }
1636  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1638  if (pass1_used) {
1639  av_log(NULL, AV_LOG_WARNING, "\n");
1640  } else {
1641  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1642  }
1643  }
1644 }
1645 
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1647 {
1648  AVBPrint buf, buf_script;
1649  OutputStream *ost;
1650  AVFormatContext *oc;
1651  int64_t total_size;
1652  AVCodecContext *enc;
1653  int frame_number, vid, i;
1654  double bitrate;
1655  double speed;
1656  int64_t pts = INT64_MIN + 1;
1657  static int64_t last_time = -1;
1658  static int qp_histogram[52];
1659  int hours, mins, secs, us;
1660  const char *hours_sign;
1661  int ret;
1662  float t;
1663 
1664  if (!print_stats && !is_last_report && !progress_avio)
1665  return;
1666 
1667  if (!is_last_report) {
1668  if (last_time == -1) {
1669  last_time = cur_time;
1670  return;
1671  }
1672  if ((cur_time - last_time) < 500000)
1673  return;
1674  last_time = cur_time;
1675  }
1676 
1677  t = (cur_time-timer_start) / 1000000.0;
1678 
1679 
1680  oc = output_files[0]->ctx;
1681 
1682  total_size = avio_size(oc->pb);
1683  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684  total_size = avio_tell(oc->pb);
1685 
1686  vid = 0;
1688  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1689  for (i = 0; i < nb_output_streams; i++) {
1690  float q = -1;
1691  ost = output_streams[i];
1692  enc = ost->enc_ctx;
1693  if (!ost->stream_copy)
1694  q = ost->quality / (float) FF_QP2LAMBDA;
1695 
1696  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697  av_bprintf(&buf, "q=%2.1f ", q);
1698  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699  ost->file_index, ost->index, q);
1700  }
1701  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702  float fps;
1703 
1704  frame_number = ost->frame_number;
1705  fps = t > 1 ? frame_number / t : 0;
1706  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1707  frame_number, fps < 9.95, fps, q);
1708  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1710  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711  ost->file_index, ost->index, q);
1712  if (is_last_report)
1713  av_bprintf(&buf, "L");
1714  if (qp_hist) {
1715  int j;
1716  int qp = lrintf(q);
1717  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1718  qp_histogram[qp]++;
1719  for (j = 0; j < 32; j++)
1720  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1721  }
1722 
1723  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1724  int j;
1725  double error, error_sum = 0;
1726  double scale, scale_sum = 0;
1727  double p;
1728  char type[3] = { 'Y','U','V' };
1729  av_bprintf(&buf, "PSNR=");
1730  for (j = 0; j < 3; j++) {
1731  if (is_last_report) {
1732  error = enc->error[j];
1733  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1734  } else {
1735  error = ost->error[j];
1736  scale = enc->width * enc->height * 255.0 * 255.0;
1737  }
1738  if (j)
1739  scale /= 4;
1740  error_sum += error;
1741  scale_sum += scale;
1742  p = psnr(error / scale);
1743  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1744  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745  ost->file_index, ost->index, type[j] | 32, p);
1746  }
1747  p = psnr(error_sum / scale_sum);
1748  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1749  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750  ost->file_index, ost->index, p);
1751  }
1752  vid = 1;
1753  }
1754  /* compute min output value */
1756  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757  ost->st->time_base, AV_TIME_BASE_Q));
1758  if (is_last_report)
1759  nb_frames_drop += ost->last_dropped;
1760  }
1761 
1762  secs = FFABS(pts) / AV_TIME_BASE;
1763  us = FFABS(pts) % AV_TIME_BASE;
1764  mins = secs / 60;
1765  secs %= 60;
1766  hours = mins / 60;
1767  mins %= 60;
1768  hours_sign = (pts < 0) ? "-" : "";
1769 
1770  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1771  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772 
1773  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1774  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1775  if (pts == AV_NOPTS_VALUE) {
1776  av_bprintf(&buf, "N/A ");
1777  } else {
1778  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1779  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1780  }
1781 
1782  if (bitrate < 0) {
1783  av_bprintf(&buf, "bitrate=N/A");
1784  av_bprintf(&buf_script, "bitrate=N/A\n");
1785  }else{
1786  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1787  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1788  }
1789 
1790  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792  if (pts == AV_NOPTS_VALUE) {
1793  av_bprintf(&buf_script, "out_time_us=N/A\n");
1794  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1795  av_bprintf(&buf_script, "out_time=N/A\n");
1796  } else {
1797  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1798  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1799  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1800  hours_sign, hours, mins, secs, us);
1801  }
1802 
1804  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1805  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1806  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1807 
1808  if (speed < 0) {
1809  av_bprintf(&buf, " speed=N/A");
1810  av_bprintf(&buf_script, "speed=N/A\n");
1811  } else {
1812  av_bprintf(&buf, " speed=%4.3gx", speed);
1813  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1814  }
1815 
1816  if (print_stats || is_last_report) {
1817  const char end = is_last_report ? '\n' : '\r';
1818  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1819  fprintf(stderr, "%s %c", buf.str, end);
1820  } else
1821  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1822 
1823  fflush(stderr);
1824  }
1825  av_bprint_finalize(&buf, NULL);
1826 
1827  if (progress_avio) {
1828  av_bprintf(&buf_script, "progress=%s\n",
1829  is_last_report ? "end" : "continue");
1830  avio_write(progress_avio, buf_script.str,
1831  FFMIN(buf_script.len, buf_script.size - 1));
1832  avio_flush(progress_avio);
1833  av_bprint_finalize(&buf_script, NULL);
1834  if (is_last_report) {
1835  if ((ret = avio_closep(&progress_avio)) < 0)
1837  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1838  }
1839  }
1840 
1841  if (is_last_report)
1842  print_final_stats(total_size);
1843 }
1844 
1846 {
1847  // We never got any input. Set a fake format, which will
1848  // come from libavformat.
1849  ifilter->format = par->format;
1850  ifilter->sample_rate = par->sample_rate;
1851  ifilter->channels = par->channels;
1852  ifilter->channel_layout = par->channel_layout;
1853  ifilter->width = par->width;
1854  ifilter->height = par->height;
1855  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1856 }
1857 
1858 static void flush_encoders(void)
1859 {
1860  int i, ret;
1861 
1862  for (i = 0; i < nb_output_streams; i++) {
1863  OutputStream *ost = output_streams[i];
1864  AVCodecContext *enc = ost->enc_ctx;
1865  OutputFile *of = output_files[ost->file_index];
1866 
1867  if (!ost->encoding_needed)
1868  continue;
1869 
1870  // Try to enable encoding with no input frames.
1871  // Maybe we should just let encoding fail instead.
1872  if (!ost->initialized) {
1873  FilterGraph *fg = ost->filter->graph;
1874  char error[1024] = "";
1875 
1877  "Finishing stream %d:%d without any data written to it.\n",
1878  ost->file_index, ost->st->index);
1879 
1880  if (ost->filter && !fg->graph) {
1881  int x;
1882  for (x = 0; x < fg->nb_inputs; x++) {
1883  InputFilter *ifilter = fg->inputs[x];
1884  if (ifilter->format < 0)
1885  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1886  }
1887 
1889  continue;
1890 
1891  ret = configure_filtergraph(fg);
1892  if (ret < 0) {
1893  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1894  exit_program(1);
1895  }
1896 
1897  finish_output_stream(ost);
1898  }
1899 
1900  ret = init_output_stream(ost, error, sizeof(error));
1901  if (ret < 0) {
1902  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1903  ost->file_index, ost->index, error);
1904  exit_program(1);
1905  }
1906  }
1907 
1908  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1909  continue;
1910 
1912  continue;
1913 
1914  for (;;) {
1915  const char *desc = NULL;
1916  AVPacket pkt;
1917  int pkt_size;
1918 
1919  switch (enc->codec_type) {
1920  case AVMEDIA_TYPE_AUDIO:
1921  desc = "audio";
1922  break;
1923  case AVMEDIA_TYPE_VIDEO:
1924  desc = "video";
1925  break;
1926  default:
1927  av_assert0(0);
1928  }
1929 
1930  av_init_packet(&pkt);
1931  pkt.data = NULL;
1932  pkt.size = 0;
1933 
1935 
1936  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1937  ret = avcodec_send_frame(enc, NULL);
1938  if (ret < 0) {
1939  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1940  desc,
1941  av_err2str(ret));
1942  exit_program(1);
1943  }
1944  }
1945 
1946  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1947  if (ret < 0 && ret != AVERROR_EOF) {
1948  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1949  desc,
1950  av_err2str(ret));
1951  exit_program(1);
1952  }
1953  if (ost->logfile && enc->stats_out) {
1954  fprintf(ost->logfile, "%s", enc->stats_out);
1955  }
1956  if (ret == AVERROR_EOF) {
1957  output_packet(of, &pkt, ost, 1);
1958  break;
1959  }
1960  if (ost->finished & MUXER_FINISHED) {
1961  av_packet_unref(&pkt);
1962  continue;
1963  }
1964  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1965  pkt_size = pkt.size;
1966  output_packet(of, &pkt, ost, 0);
1968  do_video_stats(ost, pkt_size);
1969  }
1970  }
1971  }
1972 }
1973 
1974 /*
1975  * Check whether a packet from ist should be written into ost at this time
1976  */
1978 {
1979  OutputFile *of = output_files[ost->file_index];
1980  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1981 
1982  if (ost->source_index != ist_index)
1983  return 0;
1984 
1985  if (ost->finished)
1986  return 0;
1987 
1988  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1989  return 0;
1990 
1991  return 1;
1992 }
1993 
1994 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1995 {
1996  OutputFile *of = output_files[ost->file_index];
1997  InputFile *f = input_files [ist->file_index];
1998  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1999  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2000  AVPacket opkt;
2001 
2002  // EOF: flush output bitstream filters.
2003  if (!pkt) {
2004  av_init_packet(&opkt);
2005  opkt.data = NULL;
2006  opkt.size = 0;
2007  output_packet(of, &opkt, ost, 1);
2008  return;
2009  }
2010 
2011  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2013  return;
2014 
2015  if (!ost->frame_number && !ost->copy_prior_start) {
2016  int64_t comp_start = start_time;
2017  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2018  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2019  if (pkt->pts == AV_NOPTS_VALUE ?
2020  ist->pts < comp_start :
2021  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2022  return;
2023  }
2024 
2025  if (of->recording_time != INT64_MAX &&
2026  ist->pts >= of->recording_time + start_time) {
2027  close_output_stream(ost);
2028  return;
2029  }
2030 
2031  if (f->recording_time != INT64_MAX) {
2032  start_time = f->ctx->start_time;
2033  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2034  start_time += f->start_time;
2035  if (ist->pts >= f->recording_time + start_time) {
2036  close_output_stream(ost);
2037  return;
2038  }
2039  }
2040 
2041  /* force the input stream PTS */
2042  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2043  ost->sync_opts++;
2044 
2045  if (av_packet_ref(&opkt, pkt) < 0)
2046  exit_program(1);
2047 
2048  if (pkt->pts != AV_NOPTS_VALUE)
2049  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2050 
2051  if (pkt->dts == AV_NOPTS_VALUE)
2052  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2053  else
2054  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2055  opkt.dts -= ost_tb_start_time;
2056 
2057  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2059  if(!duration)
2060  duration = ist->dec_ctx->frame_size;
2061  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2062  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2063  ost->mux_timebase) - ost_tb_start_time;
2064  }
2065 
2066  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2067 
2068  output_packet(of, &opkt, ost, 0);
2069 }
2070 
2072 {
2073  AVCodecContext *dec = ist->dec_ctx;
2074 
2075  if (!dec->channel_layout) {
2076  char layout_name[256];
2077 
2078  if (dec->channels > ist->guess_layout_max)
2079  return 0;
2081  if (!dec->channel_layout)
2082  return 0;
2083  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2084  dec->channels, dec->channel_layout);
2085  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2086  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2087  }
2088  return 1;
2089 }
2090 
2091 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2092 {
2093  if (*got_output || ret<0)
2094  decode_error_stat[ret<0] ++;
2095 
2096  if (ret < 0 && exit_on_error)
2097  exit_program(1);
2098 
2099  if (*got_output && ist) {
2102  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2103  if (exit_on_error)
2104  exit_program(1);
2105  }
2106  }
2107 }
2108 
2109 // Filters can be configured only if the formats of all inputs are known.
2111 {
2112  int i;
2113  for (i = 0; i < fg->nb_inputs; i++) {
2114  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2115  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2116  return 0;
2117  }
2118  return 1;
2119 }
2120 
2122 {
2123  FilterGraph *fg = ifilter->graph;
2124  int need_reinit, ret, i;
2125 
2126  /* determine if the parameters for this input changed */
2127  need_reinit = ifilter->format != frame->format;
2128 
2129  switch (ifilter->ist->st->codecpar->codec_type) {
2130  case AVMEDIA_TYPE_AUDIO:
2131  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2132  ifilter->channels != frame->channels ||
2133  ifilter->channel_layout != frame->channel_layout;
2134  break;
2135  case AVMEDIA_TYPE_VIDEO:
2136  need_reinit |= ifilter->width != frame->width ||
2137  ifilter->height != frame->height;
2138  break;
2139  }
2140 
2141  if (!ifilter->ist->reinit_filters && fg->graph)
2142  need_reinit = 0;
2143 
2144  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2145  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2146  need_reinit = 1;
2147 
2148  if (need_reinit) {
2149  ret = ifilter_parameters_from_frame(ifilter, frame);
2150  if (ret < 0)
2151  return ret;
2152  }
2153 
2154  /* (re)init the graph if possible, otherwise buffer the frame and return */
2155  if (need_reinit || !fg->graph) {
2156  for (i = 0; i < fg->nb_inputs; i++) {
2157  if (!ifilter_has_all_input_formats(fg)) {
2158  AVFrame *tmp = av_frame_clone(frame);
2159  if (!tmp)
2160  return AVERROR(ENOMEM);
2161  av_frame_unref(frame);
2162 
2163  if (!av_fifo_space(ifilter->frame_queue)) {
2164  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2165  if (ret < 0) {
2166  av_frame_free(&tmp);
2167  return ret;
2168  }
2169  }
2170  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2171  return 0;
2172  }
2173  }
2174 
2175  ret = reap_filters(1);
2176  if (ret < 0 && ret != AVERROR_EOF) {
2177  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2178  return ret;
2179  }
2180 
2181  ret = configure_filtergraph(fg);
2182  if (ret < 0) {
2183  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2184  return ret;
2185  }
2186  }
2187 
2189  if (ret < 0) {
2190  if (ret != AVERROR_EOF)
2191  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2192  return ret;
2193  }
2194 
2195  return 0;
2196 }
2197 
2198 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2199 {
2200  int ret;
2201 
2202  ifilter->eof = 1;
2203 
2204  if (ifilter->filter) {
2205  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2206  if (ret < 0)
2207  return ret;
2208  } else {
2209  // the filtergraph was never configured
2210  if (ifilter->format < 0)
2211  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2212  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2213  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2214  return AVERROR_INVALIDDATA;
2215  }
2216  }
2217 
2218  return 0;
2219 }
2220 
2221 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2222 // There is the following difference: if you got a frame, you must call
2223 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2224 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2225 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2226 {
2227  int ret;
2228 
2229  *got_frame = 0;
2230 
2231  if (pkt) {
2232  ret = avcodec_send_packet(avctx, pkt);
2233  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2234  // decoded frames with avcodec_receive_frame() until done.
2235  if (ret < 0 && ret != AVERROR_EOF)
2236  return ret;
2237  }
2238 
2239  ret = avcodec_receive_frame(avctx, frame);
2240  if (ret < 0 && ret != AVERROR(EAGAIN))
2241  return ret;
2242  if (ret >= 0)
2243  *got_frame = 1;
2244 
2245  return 0;
2246 }
2247 
2248 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2249 {
2250  int i, ret;
2251  AVFrame *f;
2252 
2253  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2254  for (i = 0; i < ist->nb_filters; i++) {
2255  if (i < ist->nb_filters - 1) {
2256  f = ist->filter_frame;
2257  ret = av_frame_ref(f, decoded_frame);
2258  if (ret < 0)
2259  break;
2260  } else
2261  f = decoded_frame;
2262  ret = ifilter_send_frame(ist->filters[i], f);
2263  if (ret == AVERROR_EOF)
2264  ret = 0; /* ignore */
2265  if (ret < 0) {
2267  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2268  break;
2269  }
2270  }
2271  return ret;
2272 }
2273 
2274 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2275  int *decode_failed)
2276 {
2277  AVFrame *decoded_frame;
2278  AVCodecContext *avctx = ist->dec_ctx;
2279  int ret, err = 0;
2280  AVRational decoded_frame_tb;
2281 
2282  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2283  return AVERROR(ENOMEM);
2284  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2285  return AVERROR(ENOMEM);
2286  decoded_frame = ist->decoded_frame;
2287 
2289  ret = decode(avctx, decoded_frame, got_output, pkt);
2290  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2291  if (ret < 0)
2292  *decode_failed = 1;
2293 
2294  if (ret >= 0 && avctx->sample_rate <= 0) {
2295  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2296  ret = AVERROR_INVALIDDATA;
2297  }
2298 
2299  if (ret != AVERROR_EOF)
2300  check_decode_result(ist, got_output, ret);
2301 
2302  if (!*got_output || ret < 0)
2303  return ret;
2304 
2305  ist->samples_decoded += decoded_frame->nb_samples;
2306  ist->frames_decoded++;
2307 
2308  /* increment next_dts to use for the case where the input stream does not
2309  have timestamps or there are multiple frames in the packet */
2310  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2311  avctx->sample_rate;
2312  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2313  avctx->sample_rate;
2314 
2315  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2316  decoded_frame_tb = ist->st->time_base;
2317  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2318  decoded_frame->pts = pkt->pts;
2319  decoded_frame_tb = ist->st->time_base;
2320  }else {
2321  decoded_frame->pts = ist->dts;
2322  decoded_frame_tb = AV_TIME_BASE_Q;
2323  }
2324  if (decoded_frame->pts != AV_NOPTS_VALUE)
2325  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2326  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2327  (AVRational){1, avctx->sample_rate});
2328  ist->nb_samples = decoded_frame->nb_samples;
2329  err = send_frame_to_filters(ist, decoded_frame);
2330 
2332  av_frame_unref(decoded_frame);
2333  return err < 0 ? err : ret;
2334 }
2335 
2336 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2337  int *decode_failed)
2338 {
2339  AVFrame *decoded_frame;
2340  int i, ret = 0, err = 0;
2341  int64_t best_effort_timestamp;
2342  int64_t dts = AV_NOPTS_VALUE;
2343  AVPacket avpkt;
2344 
2345  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2346  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2347  // skip the packet.
2348  if (!eof && pkt && pkt->size == 0)
2349  return 0;
2350 
2351  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2352  return AVERROR(ENOMEM);
2353  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2354  return AVERROR(ENOMEM);
2355  decoded_frame = ist->decoded_frame;
2356  if (ist->dts != AV_NOPTS_VALUE)
2357  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2358  if (pkt) {
2359  avpkt = *pkt;
2360  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2361  }
2362 
2363  // The old code used to set dts on the drain packet, which does not work
2364  // with the new API anymore.
2365  if (eof) {
2366  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2367  if (!new)
2368  return AVERROR(ENOMEM);
2369  ist->dts_buffer = new;
2370  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2371  }
2372 
2374  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2375  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2376  if (ret < 0)
2377  *decode_failed = 1;
2378 
2379  // The following line may be required in some cases where there is no parser
2380  // or the parser does not has_b_frames correctly
2381  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2382  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2383  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2384  } else
2386  "video_delay is larger in decoder than demuxer %d > %d.\n"
2387  "If you want to help, upload a sample "
2388  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2389  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2390  ist->dec_ctx->has_b_frames,
2391  ist->st->codecpar->video_delay);
2392  }
2393 
2394  if (ret != AVERROR_EOF)
2395  check_decode_result(ist, got_output, ret);
2396 
2397  if (*got_output && ret >= 0) {
2398  if (ist->dec_ctx->width != decoded_frame->width ||
2399  ist->dec_ctx->height != decoded_frame->height ||
2400  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2401  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2402  decoded_frame->width,
2403  decoded_frame->height,
2404  decoded_frame->format,
2405  ist->dec_ctx->width,
2406  ist->dec_ctx->height,
2407  ist->dec_ctx->pix_fmt);
2408  }
2409  }
2410 
2411  if (!*got_output || ret < 0)
2412  return ret;
2413 
2414  if(ist->top_field_first>=0)
2415  decoded_frame->top_field_first = ist->top_field_first;
2416 
2417  ist->frames_decoded++;
2418 
2419  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2420  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2421  if (err < 0)
2422  goto fail;
2423  }
2424  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2425 
2426  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2427  *duration_pts = decoded_frame->pkt_duration;
2428 
2429  if (ist->framerate.num)
2430  best_effort_timestamp = ist->cfr_next_pts++;
2431 
2432  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2433  best_effort_timestamp = ist->dts_buffer[0];
2434 
2435  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2436  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2437  ist->nb_dts_buffer--;
2438  }
2439 
2440  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2441  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2442 
2443  if (ts != AV_NOPTS_VALUE)
2444  ist->next_pts = ist->pts = ts;
2445  }
2446 
2447  if (debug_ts) {
2448  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2449  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2450  ist->st->index, av_ts2str(decoded_frame->pts),
2451  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2452  best_effort_timestamp,
2453  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2454  decoded_frame->key_frame, decoded_frame->pict_type,
2455  ist->st->time_base.num, ist->st->time_base.den);
2456  }
2457 
2458  if (ist->st->sample_aspect_ratio.num)
2459  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2460 
2461  err = send_frame_to_filters(ist, decoded_frame);
2462 
2463 fail:
2465  av_frame_unref(decoded_frame);
2466  return err < 0 ? err : ret;
2467 }
2468 
2469 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2470  int *decode_failed)
2471 {
2472  AVSubtitle subtitle;
2473  int free_sub = 1;
2474  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2475  &subtitle, got_output, pkt);
2476 
2477  check_decode_result(NULL, got_output, ret);
2478 
2479  if (ret < 0 || !*got_output) {
2480  *decode_failed = 1;
2481  if (!pkt->size)
2482  sub2video_flush(ist);
2483  return ret;
2484  }
2485 
2486  if (ist->fix_sub_duration) {
2487  int end = 1;
2488  if (ist->prev_sub.got_output) {
2489  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2490  1000, AV_TIME_BASE);
2491  if (end < ist->prev_sub.subtitle.end_display_time) {
2492  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2493  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2495  end <= 0 ? ", dropping it" : "");
2497  }
2498  }
2499  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2500  FFSWAP(int, ret, ist->prev_sub.ret);
2501  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2502  if (end <= 0)
2503  goto out;
2504  }
2505 
2506  if (!*got_output)
2507  return ret;
2508 
2509  if (ist->sub2video.frame) {
2510  sub2video_update(ist, &subtitle);
2511  } else if (ist->nb_filters) {
2512  if (!ist->sub2video.sub_queue)
2513  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2514  if (!ist->sub2video.sub_queue)
2515  exit_program(1);
2516  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2518  if (ret < 0)
2519  exit_program(1);
2520  }
2521  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2522  free_sub = 0;
2523  }
2524 
2525  if (!subtitle.num_rects)
2526  goto out;
2527 
2528  ist->frames_decoded++;
2529 
2530  for (i = 0; i < nb_output_streams; i++) {
2531  OutputStream *ost = output_streams[i];
2532 
2533  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2534  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2535  continue;
2536 
2537  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2538  }
2539 
2540 out:
2541  if (free_sub)
2542  avsubtitle_free(&subtitle);
2543  return ret;
2544 }
2545 
2547 {
2548  int i, ret;
2549  /* TODO keep pts also in stream time base to avoid converting back */
2550  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2552 
2553  for (i = 0; i < ist->nb_filters; i++) {
2554  ret = ifilter_send_eof(ist->filters[i], pts);
2555  if (ret < 0)
2556  return ret;
2557  }
2558  return 0;
2559 }
2560 
2561 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2562 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2563 {
2564  int ret = 0, i;
2565  int repeating = 0;
2566  int eof_reached = 0;
2567 
2568  AVPacket avpkt;
2569  if (!ist->saw_first_ts) {
2570  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2571  ist->pts = 0;
2572  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2573  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2574  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2575  }
2576  ist->saw_first_ts = 1;
2577  }
2578 
2579  if (ist->next_dts == AV_NOPTS_VALUE)
2580  ist->next_dts = ist->dts;
2581  if (ist->next_pts == AV_NOPTS_VALUE)
2582  ist->next_pts = ist->pts;
2583 
2584  if (!pkt) {
2585  /* EOF handling */
2586  av_init_packet(&avpkt);
2587  avpkt.data = NULL;
2588  avpkt.size = 0;
2589  } else {
2590  avpkt = *pkt;
2591  }
2592 
2593  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2594  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2595  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2596  ist->next_pts = ist->pts = ist->dts;
2597  }
2598 
2599  // while we have more to decode or while the decoder did output something on EOF
2600  while (ist->decoding_needed) {
2601  int64_t duration_dts = 0;
2602  int64_t duration_pts = 0;
2603  int got_output = 0;
2604  int decode_failed = 0;
2605 
2606  ist->pts = ist->next_pts;
2607  ist->dts = ist->next_dts;
2608 
2609  switch (ist->dec_ctx->codec_type) {
2610  case AVMEDIA_TYPE_AUDIO:
2611  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2612  &decode_failed);
2613  break;
2614  case AVMEDIA_TYPE_VIDEO:
2615  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2616  &decode_failed);
2617  if (!repeating || !pkt || got_output) {
2618  if (pkt && pkt->duration) {
2619  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2620  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2622  duration_dts = ((int64_t)AV_TIME_BASE *
2623  ist->dec_ctx->framerate.den * ticks) /
2625  }
2626 
2627  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2628  ist->next_dts += duration_dts;
2629  }else
2630  ist->next_dts = AV_NOPTS_VALUE;
2631  }
2632 
2633  if (got_output) {
2634  if (duration_pts > 0) {
2635  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2636  } else {
2637  ist->next_pts += duration_dts;
2638  }
2639  }
2640  break;
2641  case AVMEDIA_TYPE_SUBTITLE:
2642  if (repeating)
2643  break;
2644  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2645  if (!pkt && ret >= 0)
2646  ret = AVERROR_EOF;
2647  break;
2648  default:
2649  return -1;
2650  }
2651 
2652  if (ret == AVERROR_EOF) {
2653  eof_reached = 1;
2654  break;
2655  }
2656 
2657  if (ret < 0) {
2658  if (decode_failed) {
2659  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2660  ist->file_index, ist->st->index, av_err2str(ret));
2661  } else {
2662  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2663  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2664  }
2665  if (!decode_failed || exit_on_error)
2666  exit_program(1);
2667  break;
2668  }
2669 
2670  if (got_output)
2671  ist->got_output = 1;
2672 
2673  if (!got_output)
2674  break;
2675 
2676  // During draining, we might get multiple output frames in this loop.
2677  // ffmpeg.c does not drain the filter chain on configuration changes,
2678  // which means if we send multiple frames at once to the filters, and
2679  // one of those frames changes configuration, the buffered frames will
2680  // be lost. This can upset certain FATE tests.
2681  // Decode only 1 frame per call on EOF to appease these FATE tests.
2682  // The ideal solution would be to rewrite decoding to use the new
2683  // decoding API in a better way.
2684  if (!pkt)
2685  break;
2686 
2687  repeating = 1;
2688  }
2689 
2690  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2691  /* except when looping we need to flush but not to send an EOF */
2692  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2693  int ret = send_filter_eof(ist);
2694  if (ret < 0) {
2695  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2696  exit_program(1);
2697  }
2698  }
2699 
2700  /* handle stream copy */
2701  if (!ist->decoding_needed && pkt) {
2702  ist->dts = ist->next_dts;
2703  switch (ist->dec_ctx->codec_type) {
2704  case AVMEDIA_TYPE_AUDIO:
2705  av_assert1(pkt->duration >= 0);
2706  if (ist->dec_ctx->sample_rate) {
2707  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2708  ist->dec_ctx->sample_rate;
2709  } else {
2710  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2711  }
2712  break;
2713  case AVMEDIA_TYPE_VIDEO:
2714  if (ist->framerate.num) {
2715  // TODO: Remove work-around for c99-to-c89 issue 7
2716  AVRational time_base_q = AV_TIME_BASE_Q;
2717  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2718  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2719  } else if (pkt->duration) {
2720  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721  } else if(ist->dec_ctx->framerate.num != 0) {
2722  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2723  ist->next_dts += ((int64_t)AV_TIME_BASE *
2724  ist->dec_ctx->framerate.den * ticks) /
2726  }
2727  break;
2728  }
2729  ist->pts = ist->dts;
2730  ist->next_pts = ist->next_dts;
2731  }
2732  for (i = 0; i < nb_output_streams; i++) {
2733  OutputStream *ost = output_streams[i];
2734 
2735  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2736  continue;
2737 
2738  do_streamcopy(ist, ost, pkt);
2739  }
2740 
2741  return !eof_reached;
2742 }
2743 
2744 static void print_sdp(void)
2745 {
2746  char sdp[16384];
2747  int i;
2748  int j;
2749  AVIOContext *sdp_pb;
2750  AVFormatContext **avc;
2751 
2752  for (i = 0; i < nb_output_files; i++) {
2753  if (!output_files[i]->header_written)
2754  return;
2755  }
2756 
2757  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2758  if (!avc)
2759  exit_program(1);
2760  for (i = 0, j = 0; i < nb_output_files; i++) {
2761  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2762  avc[j] = output_files[i]->ctx;
2763  j++;
2764  }
2765  }
2766 
2767  if (!j)
2768  goto fail;
2769 
2770  av_sdp_create(avc, j, sdp, sizeof(sdp));
2771 
2772  if (!sdp_filename) {
2773  printf("SDP:\n%s\n", sdp);
2774  fflush(stdout);
2775  } else {
2776  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2777  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2778  } else {
2779  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2780  avio_closep(&sdp_pb);
2782  }
2783  }
2784 
2785 fail:
2786  av_freep(&avc);
2787 }
2788 
2790 {
2791  InputStream *ist = s->opaque;
2792  const enum AVPixelFormat *p;
2793  int ret;
2794 
2795  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2797  const AVCodecHWConfig *config = NULL;
2798  int i;
2799 
2800  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2801  break;
2802 
2803  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2804  ist->hwaccel_id == HWACCEL_AUTO) {
2805  for (i = 0;; i++) {
2806  config = avcodec_get_hw_config(s->codec, i);
2807  if (!config)
2808  break;
2809  if (!(config->methods &
2811  continue;
2812  if (config->pix_fmt == *p)
2813  break;
2814  }
2815  }
2816  if (config) {
2817  if (config->device_type != ist->hwaccel_device_type) {
2818  // Different hwaccel offered, ignore.
2819  continue;
2820  }
2821 
2822  ret = hwaccel_decode_init(s);
2823  if (ret < 0) {
2824  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2826  "%s hwaccel requested for input stream #%d:%d, "
2827  "but cannot be initialized.\n",
2829  ist->file_index, ist->st->index);
2830  return AV_PIX_FMT_NONE;
2831  }
2832  continue;
2833  }
2834  } else {
2835  const HWAccel *hwaccel = NULL;
2836  int i;
2837  for (i = 0; hwaccels[i].name; i++) {
2838  if (hwaccels[i].pix_fmt == *p) {
2839  hwaccel = &hwaccels[i];
2840  break;
2841  }
2842  }
2843  if (!hwaccel) {
2844  // No hwaccel supporting this pixfmt.
2845  continue;
2846  }
2847  if (hwaccel->id != ist->hwaccel_id) {
2848  // Does not match requested hwaccel.
2849  continue;
2850  }
2851 
2852  ret = hwaccel->init(s);
2853  if (ret < 0) {
2855  "%s hwaccel requested for input stream #%d:%d, "
2856  "but cannot be initialized.\n", hwaccel->name,
2857  ist->file_index, ist->st->index);
2858  return AV_PIX_FMT_NONE;
2859  }
2860  }
2861 
2862  if (ist->hw_frames_ctx) {
2864  if (!s->hw_frames_ctx)
2865  return AV_PIX_FMT_NONE;
2866  }
2867 
2868  ist->hwaccel_pix_fmt = *p;
2869  break;
2870  }
2871 
2872  return *p;
2873 }
2874 
2876 {
2877  InputStream *ist = s->opaque;
2878 
2879  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2880  return ist->hwaccel_get_buffer(s, frame, flags);
2881 
2882  return avcodec_default_get_buffer2(s, frame, flags);
2883 }
2884 
2885 static int init_input_stream(int ist_index, char *error, int error_len)
2886 {
2887  int ret;
2888  InputStream *ist = input_streams[ist_index];
2889 
2890  if (ist->decoding_needed) {
2891  AVCodec *codec = ist->dec;
2892  if (!codec) {
2893  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2894  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2895  return AVERROR(EINVAL);
2896  }
2897 
2898  ist->dec_ctx->opaque = ist;
2899  ist->dec_ctx->get_format = get_format;
2900  ist->dec_ctx->get_buffer2 = get_buffer;
2901  ist->dec_ctx->thread_safe_callbacks = 1;
2902 
2903  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2904  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2905  (ist->decoding_needed & DECODING_FOR_OST)) {
2906  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2908  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2909  }
2910 
2911  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2912 
2913  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2914  * audio, and video decoders such as cuvid or mediacodec */
2915  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2916 
2917  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2918  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2919  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2921  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2922 
2923  ret = hw_device_setup_for_decode(ist);
2924  if (ret < 0) {
2925  snprintf(error, error_len, "Device setup failed for "
2926  "decoder on input stream #%d:%d : %s",
2927  ist->file_index, ist->st->index, av_err2str(ret));
2928  return ret;
2929  }
2930 
2931  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2932  if (ret == AVERROR_EXPERIMENTAL)
2933  abort_codec_experimental(codec, 0);
2934 
2935  snprintf(error, error_len,
2936  "Error while opening decoder for input stream "
2937  "#%d:%d : %s",
2938  ist->file_index, ist->st->index, av_err2str(ret));
2939  return ret;
2940  }
2942  }
2943 
2944  ist->next_pts = AV_NOPTS_VALUE;
2945  ist->next_dts = AV_NOPTS_VALUE;
2946 
2947  return 0;
2948 }
2949 
2951 {
2952  if (ost->source_index >= 0)
2953  return input_streams[ost->source_index];
2954  return NULL;
2955 }
2956 
2957 static int compare_int64(const void *a, const void *b)
2958 {
2959  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2960 }
2961 
2962 /* open the muxer when all the streams are initialized */
2963 static int check_init_output_file(OutputFile *of, int file_index)
2964 {
2965  int ret, i;
2966 
2967  for (i = 0; i < of->ctx->nb_streams; i++) {
2968  OutputStream *ost = output_streams[of->ost_index + i];
2969  if (!ost->initialized)
2970  return 0;
2971  }
2972 
2973  of->ctx->interrupt_callback = int_cb;
2974 
2975  ret = avformat_write_header(of->ctx, &of->opts);
2976  if (ret < 0) {
2978  "Could not write header for output file #%d "
2979  "(incorrect codec parameters ?): %s\n",
2980  file_index, av_err2str(ret));
2981  return ret;
2982  }
2983  //assert_avoptions(of->opts);
2984  of->header_written = 1;
2985 
2986  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2987 
2988  if (sdp_filename || want_sdp)
2989  print_sdp();
2990 
2991  /* flush the muxing queues */
2992  for (i = 0; i < of->ctx->nb_streams; i++) {
2993  OutputStream *ost = output_streams[of->ost_index + i];
2994 
2995  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2996  if (!av_fifo_size(ost->muxing_queue))
2997  ost->mux_timebase = ost->st->time_base;
2998 
2999  while (av_fifo_size(ost->muxing_queue)) {
3000  AVPacket pkt;
3001  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3002  write_packet(of, &pkt, ost, 1);
3003  }
3004  }
3005 
3006  return 0;
3007 }
3008 
3010 {
3011  AVBSFContext *ctx;
3012  int i, ret;
3013 
3014  if (!ost->nb_bitstream_filters)
3015  return 0;
3016 
3017  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3018  ctx = ost->bsf_ctx[i];
3019 
3020  ret = avcodec_parameters_copy(ctx->par_in,
3021  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3022  if (ret < 0)
3023  return ret;
3024 
3025  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3026 
3027  ret = av_bsf_init(ctx);
3028  if (ret < 0) {
3029  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3030  ost->bsf_ctx[i]->filter->name);
3031  return ret;
3032  }
3033  }
3034 
3035  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3036  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3037  if (ret < 0)
3038  return ret;
3039 
3040  ost->st->time_base = ctx->time_base_out;
3041 
3042  return 0;
3043 }
3044 
3046 {
3047  OutputFile *of = output_files[ost->file_index];
3048  InputStream *ist = get_input_stream(ost);
3049  AVCodecParameters *par_dst = ost->st->codecpar;
3050  AVCodecParameters *par_src = ost->ref_par;
3051  AVRational sar;
3052  int i, ret;
3053  uint32_t codec_tag = par_dst->codec_tag;
3054 
3055  av_assert0(ist && !ost->filter);
3056 
3057  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3058  if (ret >= 0)
3059  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3060  if (ret < 0) {
3062  "Error setting up codec context options.\n");
3063  return ret;
3064  }
3065 
3066  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3067  if (ret < 0) {
3069  "Error getting reference codec parameters.\n");
3070  return ret;
3071  }
3072 
3073  if (!codec_tag) {
3074  unsigned int codec_tag_tmp;
3075  if (!of->ctx->oformat->codec_tag ||
3076  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3077  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3078  codec_tag = par_src->codec_tag;
3079  }
3080 
3081  ret = avcodec_parameters_copy(par_dst, par_src);
3082  if (ret < 0)
3083  return ret;
3084 
3085  par_dst->codec_tag = codec_tag;
3086 
3087  if (!ost->frame_rate.num)
3088  ost->frame_rate = ist->framerate;
3089  ost->st->avg_frame_rate = ost->frame_rate;
3090 
3092  if (ret < 0)
3093  return ret;
3094 
3095  // copy timebase while removing common factors
3096  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3098 
3099  // copy estimated duration as a hint to the muxer
3100  if (ost->st->duration <= 0 && ist->st->duration > 0)
3101  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3102 
3103  // copy disposition
3104  ost->st->disposition = ist->st->disposition;
3105 
3106  if (ist->st->nb_side_data) {
3107  for (i = 0; i < ist->st->nb_side_data; i++) {
3108  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3109  uint8_t *dst_data;
3110 
3111  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3112  if (!dst_data)
3113  return AVERROR(ENOMEM);
3114  memcpy(dst_data, sd_src->data, sd_src->size);
3115  }
3116  }
3117 
3118  if (ost->rotate_overridden) {
3120  sizeof(int32_t) * 9);
3121  if (sd)
3123  }
3124 
3125  switch (par_dst->codec_type) {
3126  case AVMEDIA_TYPE_AUDIO:
3127  if (audio_volume != 256) {
3128  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3129  exit_program(1);
3130  }
3131  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3132  par_dst->block_align= 0;
3133  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3134  par_dst->block_align= 0;
3135  break;
3136  case AVMEDIA_TYPE_VIDEO:
3137  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3138  sar =
3140  (AVRational){ par_dst->height, par_dst->width });
3141  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3142  "with stream copy may produce invalid files\n");
3143  }
3144  else if (ist->st->sample_aspect_ratio.num)
3145  sar = ist->st->sample_aspect_ratio;
3146  else
3147  sar = par_src->sample_aspect_ratio;
3148  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3149  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3150  ost->st->r_frame_rate = ist->st->r_frame_rate;
3151  break;
3152  }
3153 
3154  ost->mux_timebase = ist->st->time_base;
3155 
3156  return 0;
3157 }
3158 
3160 {
3161  AVDictionaryEntry *e;
3162 
3163  uint8_t *encoder_string;
3164  int encoder_string_len;
3165  int format_flags = 0;
3166  int codec_flags = ost->enc_ctx->flags;
3167 
3168  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3169  return;
3170 
3171  e = av_dict_get(of->opts, "fflags", NULL, 0);
3172  if (e) {
3173  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3174  if (!o)
3175  return;
3176  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3177  }
3178  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3179  if (e) {
3180  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3181  if (!o)
3182  return;
3183  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3184  }
3185 
3186  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3187  encoder_string = av_mallocz(encoder_string_len);
3188  if (!encoder_string)
3189  exit_program(1);
3190 
3191  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3192  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3193  else
3194  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3195  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3196  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3198 }
3199 
3200 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3201  AVCodecContext *avctx)
3202 {
3203  char *p;
3204  int n = 1, i, size, index = 0;
3205  int64_t t, *pts;
3206 
3207  for (p = kf; *p; p++)
3208  if (*p == ',')
3209  n++;
3210  size = n;
3211  pts = av_malloc_array(size, sizeof(*pts));
3212  if (!pts) {
3213  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3214  exit_program(1);
3215  }
3216 
3217  p = kf;
3218  for (i = 0; i < n; i++) {
3219  char *next = strchr(p, ',');
3220 
3221  if (next)
3222  *next++ = 0;
3223 
3224  if (!memcmp(p, "chapters", 8)) {
3225 
3226  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3227  int j;
3228 
3229  if (avf->nb_chapters > INT_MAX - size ||
3230  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3231  sizeof(*pts)))) {
3233  "Could not allocate forced key frames array.\n");
3234  exit_program(1);
3235  }
3236  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3237  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3238 
3239  for (j = 0; j < avf->nb_chapters; j++) {
3240  AVChapter *c = avf->chapters[j];
3241  av_assert1(index < size);
3242  pts[index++] = av_rescale_q(c->start, c->time_base,
3243  avctx->time_base) + t;
3244  }
3245 
3246  } else {
3247 
3248  t = parse_time_or_die("force_key_frames", p, 1);
3249  av_assert1(index < size);
3250  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3251 
3252  }
3253 
3254  p = next;
3255  }
3256 
3257  av_assert0(index == size);
3258  qsort(pts, size, sizeof(*pts), compare_int64);
3259  ost->forced_kf_count = size;
3260  ost->forced_kf_pts = pts;
3261 }
3262 
3263 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3264 {
3265  InputStream *ist = get_input_stream(ost);
3266  AVCodecContext *enc_ctx = ost->enc_ctx;
3267  AVFormatContext *oc;
3268 
3269  if (ost->enc_timebase.num > 0) {
3270  enc_ctx->time_base = ost->enc_timebase;
3271  return;
3272  }
3273 
3274  if (ost->enc_timebase.num < 0) {
3275  if (ist) {
3276  enc_ctx->time_base = ist->st->time_base;
3277  return;
3278  }
3279 
3280  oc = output_files[ost->file_index]->ctx;
3281  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3282  }
3283 
3284  enc_ctx->time_base = default_time_base;
3285 }
3286 
3288 {
3289  InputStream *ist = get_input_stream(ost);
3290  AVCodecContext *enc_ctx = ost->enc_ctx;
3292  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3293  int j, ret;
3294 
3295  set_encoder_id(output_files[ost->file_index], ost);
3296 
3297  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3298  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3299  // which have to be filtered out to prevent leaking them to output files.
3300  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3301 
3302  if (ist) {
3303  ost->st->disposition = ist->st->disposition;
3304 
3305  dec_ctx = ist->dec_ctx;
3306 
3307  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3308  } else {
3309  for (j = 0; j < oc->nb_streams; j++) {
3310  AVStream *st = oc->streams[j];
3311  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3312  break;
3313  }
3314  if (j == oc->nb_streams)
3315  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3318  }
3319 
3320  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3321  if (!ost->frame_rate.num)
3323  if (ist && !ost->frame_rate.num)
3324  ost->frame_rate = ist->framerate;
3325  if (ist && !ost->frame_rate.num)
3326  ost->frame_rate = ist->st->r_frame_rate;
3327  if (ist && !ost->frame_rate.num) {
3328  ost->frame_rate = (AVRational){25, 1};
3330  "No information "
3331  "about the input framerate is available. Falling "
3332  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3333  "if you want a different framerate.\n",
3334  ost->file_index, ost->index);
3335  }
3336 
3337  if (ost->enc->supported_framerates && !ost->force_fps) {
3338  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3339  ost->frame_rate = ost->enc->supported_framerates[idx];
3340  }
3341  // reduce frame rate for mpeg4 to be within the spec limits
3342  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3343  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3344  ost->frame_rate.num, ost->frame_rate.den, 65535);
3345  }
3346  }
3347 
3348  switch (enc_ctx->codec_type) {
3349  case AVMEDIA_TYPE_AUDIO:
3351  if (dec_ctx)
3352  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3353  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3357 
3358  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3359  break;
3360 
3361  case AVMEDIA_TYPE_VIDEO:
3363 
3364  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3366  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3368  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3369  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3370  }
3371 
3372  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3373  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3374  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3375  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3376  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3378 
3379  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3380  if (dec_ctx)
3381  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3382  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3383 
3384  enc_ctx->framerate = ost->frame_rate;
3385 
3386  ost->st->avg_frame_rate = ost->frame_rate;
3387 
3388  if (!dec_ctx ||
3389  enc_ctx->width != dec_ctx->width ||
3390  enc_ctx->height != dec_ctx->height ||
3391  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3393  }
3394 
3395  if (ost->top_field_first == 0) {
3396  enc_ctx->field_order = AV_FIELD_BB;
3397  } else if (ost->top_field_first == 1) {
3398  enc_ctx->field_order = AV_FIELD_TT;
3399  }
3400 
3401  if (ost->forced_keyframes) {
3402  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3405  if (ret < 0) {
3407  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3408  return ret;
3409  }
3414 
3415  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416  // parse it only for static kf timings
3417  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3419  }
3420  }
3421  break;
3422  case AVMEDIA_TYPE_SUBTITLE:
3423  enc_ctx->time_base = AV_TIME_BASE_Q;
3424  if (!enc_ctx->width) {
3425  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3427  }
3428  break;
3429  case AVMEDIA_TYPE_DATA:
3430  break;
3431  default:
3432  abort();
3433  break;
3434  }
3435 
3436  ost->mux_timebase = enc_ctx->time_base;
3437 
3438  return 0;
3439 }
3440 
3441 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3442 {
3443  int ret = 0;
3444 
3445  if (ost->encoding_needed) {
3446  AVCodec *codec = ost->enc;
3447  AVCodecContext *dec = NULL;
3448  InputStream *ist;
3449 
3450  ret = init_output_stream_encode(ost);
3451  if (ret < 0)
3452  return ret;
3453 
3454  if ((ist = get_input_stream(ost)))
3455  dec = ist->dec_ctx;
3456  if (dec && dec->subtitle_header) {
3457  /* ASS code assumes this buffer is null terminated so add extra byte. */
3459  if (!ost->enc_ctx->subtitle_header)
3460  return AVERROR(ENOMEM);
3461  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3463  }
3464  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3465  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3466  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3467  !codec->defaults &&
3468  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3469  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3470  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3471 
3472  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3476  if (!ost->enc_ctx->hw_frames_ctx)
3477  return AVERROR(ENOMEM);
3478  } else {
3479  ret = hw_device_setup_for_encode(ost);
3480  if (ret < 0) {
3481  snprintf(error, error_len, "Device setup failed for "
3482  "encoder on output stream #%d:%d : %s",
3483  ost->file_index, ost->index, av_err2str(ret));
3484  return ret;
3485  }
3486  }
3487  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3488  int input_props = 0, output_props = 0;
3489  AVCodecDescriptor const *input_descriptor =
3491  AVCodecDescriptor const *output_descriptor =
3493  if (input_descriptor)
3494  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3495  if (output_descriptor)
3496  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3497  if (input_props && output_props && input_props != output_props) {
3498  snprintf(error, error_len,
3499  "Subtitle encoding currently only possible from text to text "
3500  "or bitmap to bitmap");
3501  return AVERROR_INVALIDDATA;
3502  }
3503  }
3504 
3505  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3506  if (ret == AVERROR_EXPERIMENTAL)
3507  abort_codec_experimental(codec, 1);
3508  snprintf(error, error_len,
3509  "Error while opening encoder for output stream #%d:%d - "
3510  "maybe incorrect parameters such as bit_rate, rate, width or height",
3511  ost->file_index, ost->index);
3512  return ret;
3513  }
3514  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3515  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3517  ost->enc_ctx->frame_size);
3519  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3520  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3521  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3522  " It takes bits/s as argument, not kbits/s\n");
3523 
3525  if (ret < 0) {
3527  "Error initializing the output stream codec context.\n");
3528  exit_program(1);
3529  }
3530  /*
3531  * FIXME: ost->st->codec should't be needed here anymore.
3532  */
3533  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3534  if (ret < 0)
3535  return ret;
3536 
3537  if (ost->enc_ctx->nb_coded_side_data) {
3538  int i;
3539 
3540  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3541  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3542  uint8_t *dst_data;
3543 
3544  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3545  if (!dst_data)
3546  return AVERROR(ENOMEM);
3547  memcpy(dst_data, sd_src->data, sd_src->size);
3548  }
3549  }
3550 
3551  /*
3552  * Add global input side data. For now this is naive, and copies it
3553  * from the input stream's global side data. All side data should
3554  * really be funneled over AVFrame and libavfilter, then added back to
3555  * packet side data, and then potentially using the first packet for
3556  * global side data.
3557  */
3558  if (ist) {
3559  int i;
3560  for (i = 0; i < ist->st->nb_side_data; i++) {
3561  AVPacketSideData *sd = &ist->st->side_data[i];
3562  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3563  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3564  if (!dst)
3565  return AVERROR(ENOMEM);
3566  memcpy(dst, sd->data, sd->size);
3567  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3568  av_display_rotation_set((uint32_t *)dst, 0);
3569  }
3570  }
3571  }
3572 
3573  // copy timebase while removing common factors
3574  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3575  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3576 
3577  // copy estimated duration as a hint to the muxer
3578  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3579  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3580 
3581  ost->st->codec->codec= ost->enc_ctx->codec;
3582  } else if (ost->stream_copy) {
3583  ret = init_output_stream_streamcopy(ost);
3584  if (ret < 0)
3585  return ret;
3586  }
3587 
3588  // parse user provided disposition, and update stream values
3589  if (ost->disposition) {
3590  static const AVOption opts[] = {
3591  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3592  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3593  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3594  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3595  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3596  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3597  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3598  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3599  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3600  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3601  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3602  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3603  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3604  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3605  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3606  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3607  { NULL },
3608  };
3609  static const AVClass class = {
3610  .class_name = "",
3611  .item_name = av_default_item_name,
3612  .option = opts,
3613  .version = LIBAVUTIL_VERSION_INT,
3614  };
3615  const AVClass *pclass = &class;
3616 
3617  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3618  if (ret < 0)
3619  return ret;
3620  }
3621 
3622  /* initialize bitstream filters for the output stream
3623  * needs to be done here, because the codec id for streamcopy is not
3624  * known until now */
3625  ret = init_output_bsfs(ost);
3626  if (ret < 0)
3627  return ret;
3628 
3629  ost->initialized = 1;
3630 
3631  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3632  if (ret < 0)
3633  return ret;
3634 
3635  return ret;
3636 }
3637 
3638 static void report_new_stream(int input_index, AVPacket *pkt)
3639 {
3640  InputFile *file = input_files[input_index];
3641  AVStream *st = file->ctx->streams[pkt->stream_index];
3642 
3643  if (pkt->stream_index < file->nb_streams_warn)
3644  return;
3645  av_log(file->ctx, AV_LOG_WARNING,
3646  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3648  input_index, pkt->stream_index,
3649  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3650  file->nb_streams_warn = pkt->stream_index + 1;
3651 }
3652 
3653 static int transcode_init(void)
3654 {
3655  int ret = 0, i, j, k;
3656  AVFormatContext *oc;
3657  OutputStream *ost;
3658  InputStream *ist;
3659  char error[1024] = {0};
3660 
3661  for (i = 0; i < nb_filtergraphs; i++) {
3662  FilterGraph *fg = filtergraphs[i];
3663  for (j = 0; j < fg->nb_outputs; j++) {
3664  OutputFilter *ofilter = fg->outputs[j];
3665  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3666  continue;
3667  if (fg->nb_inputs != 1)
3668  continue;
3669  for (k = nb_input_streams-1; k >= 0 ; k--)
3670  if (fg->inputs[0]->ist == input_streams[k])
3671  break;
3672  ofilter->ost->source_index = k;
3673  }
3674  }
3675 
3676  /* init framerate emulation */
3677  for (i = 0; i < nb_input_files; i++) {
3678  InputFile *ifile = input_files[i];
3679  if (ifile->rate_emu)
3680  for (j = 0; j < ifile->nb_streams; j++)
3681  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3682  }
3683 
3684  /* init input streams */
3685  for (i = 0; i < nb_input_streams; i++)
3686  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3687  for (i = 0; i < nb_output_streams; i++) {
3688  ost = output_streams[i];
3689  avcodec_close(ost->enc_ctx);
3690  }
3691  goto dump_format;
3692  }
3693 
3694  /* open each encoder */
3695  for (i = 0; i < nb_output_streams; i++) {
3696  // skip streams fed from filtergraphs until we have a frame for them
3697  if (output_streams[i]->filter)
3698  continue;
3699 
3700  ret = init_output_stream(output_streams[i], error, sizeof(error));
3701  if (ret < 0)
3702  goto dump_format;
3703  }
3704 
3705  /* discard unused programs */
3706  for (i = 0; i < nb_input_files; i++) {
3707  InputFile *ifile = input_files[i];
3708  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3709  AVProgram *p = ifile->ctx->programs[j];
3710  int discard = AVDISCARD_ALL;
3711 
3712  for (k = 0; k < p->nb_stream_indexes; k++)
3713  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3714  discard = AVDISCARD_DEFAULT;
3715  break;
3716  }
3717  p->discard = discard;
3718  }
3719  }
3720 
3721  /* write headers for files with no streams */
3722  for (i = 0; i < nb_output_files; i++) {
3723  oc = output_files[i]->ctx;
3724  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3725  ret = check_init_output_file(output_files[i], i);
3726  if (ret < 0)
3727  goto dump_format;
3728  }
3729  }
3730 
3731  dump_format:
3732  /* dump the stream mapping */
3733  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3734  for (i = 0; i < nb_input_streams; i++) {
3735  ist = input_streams[i];
3736 
3737  for (j = 0; j < ist->nb_filters; j++) {
3738  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3739  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3740  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3741  ist->filters[j]->name);
3742  if (nb_filtergraphs > 1)
3743  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3744  av_log(NULL, AV_LOG_INFO, "\n");
3745  }
3746  }
3747  }
3748 
3749  for (i = 0; i < nb_output_streams; i++) {
3750  ost = output_streams[i];
3751 
3752  if (ost->attachment_filename) {
3753  /* an attached file */
3754  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3755  ost->attachment_filename, ost->file_index, ost->index);
3756  continue;
3757  }
3758 
3759  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3760  /* output from a complex graph */
3761  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3762  if (nb_filtergraphs > 1)
3763  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3764 
3765  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3766  ost->index, ost->enc ? ost->enc->name : "?");
3767  continue;
3768  }
3769 
3770  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3771  input_streams[ost->source_index]->file_index,
3772  input_streams[ost->source_index]->st->index,
3773  ost->file_index,
3774  ost->index);
3775  if (ost->sync_ist != input_streams[ost->source_index])
3776  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3777  ost->sync_ist->file_index,
3778  ost->sync_ist->st->index);
3779  if (ost->stream_copy)
3780  av_log(NULL, AV_LOG_INFO, " (copy)");
3781  else {
3782  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3783  const AVCodec *out_codec = ost->enc;
3784  const char *decoder_name = "?";
3785  const char *in_codec_name = "?";
3786  const char *encoder_name = "?";
3787  const char *out_codec_name = "?";
3788  const AVCodecDescriptor *desc;
3789 
3790  if (in_codec) {
3791  decoder_name = in_codec->name;
3792  desc = avcodec_descriptor_get(in_codec->id);
3793  if (desc)
3794  in_codec_name = desc->name;
3795  if (!strcmp(decoder_name, in_codec_name))
3796  decoder_name = "native";
3797  }
3798 
3799  if (out_codec) {
3800  encoder_name = out_codec->name;
3801  desc = avcodec_descriptor_get(out_codec->id);
3802  if (desc)
3803  out_codec_name = desc->name;
3804  if (!strcmp(encoder_name, out_codec_name))
3805  encoder_name = "native";
3806  }
3807 
3808  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3809  in_codec_name, decoder_name,
3810  out_codec_name, encoder_name);
3811  }
3812  av_log(NULL, AV_LOG_INFO, "\n");
3813  }
3814 
3815  if (ret) {
3816  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3817  return ret;
3818  }
3819 
3821 
3822  return 0;
3823 }
3824 
3825 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3826 static int need_output(void)
3827 {
3828  int i;
3829 
3830  for (i = 0; i < nb_output_streams; i++) {
3831  OutputStream *ost = output_streams[i];
3832  OutputFile *of = output_files[ost->file_index];
3833  AVFormatContext *os = output_files[ost->file_index]->ctx;
3834 
3835  if (ost->finished ||
3836  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3837  continue;
3838  if (ost->frame_number >= ost->max_frames) {
3839  int j;
3840  for (j = 0; j < of->ctx->nb_streams; j++)
3841  close_output_stream(output_streams[of->ost_index + j]);
3842  continue;
3843  }
3844 
3845  return 1;
3846  }
3847 
3848  return 0;
3849 }
3850 
3851 /**
3852  * Select the output stream to process.
3853  *
3854  * @return selected output stream, or NULL if none available
3855  */
3857 {
3858  int i;
3859  int64_t opts_min = INT64_MAX;
3860  OutputStream *ost_min = NULL;
3861 
3862  for (i = 0; i < nb_output_streams; i++) {
3863  OutputStream *ost = output_streams[i];
3864  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3865  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3866  AV_TIME_BASE_Q);
3867  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3869  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3870  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3871 
3872  if (!ost->initialized && !ost->inputs_done)
3873  return ost;
3874 
3875  if (!ost->finished && opts < opts_min) {
3876  opts_min = opts;
3877  ost_min = ost->unavailable ? NULL : ost;
3878  }
3879  }
3880  return ost_min;
3881 }
3882 
3883 static void set_tty_echo(int on)
3884 {
3885 #if HAVE_TERMIOS_H
3886  struct termios tty;
3887  if (tcgetattr(0, &tty) == 0) {
3888  if (on) tty.c_lflag |= ECHO;
3889  else tty.c_lflag &= ~ECHO;
3890  tcsetattr(0, TCSANOW, &tty);
3891  }
3892 #endif
3893 }
3894 
3895 static int check_keyboard_interaction(int64_t cur_time)
3896 {
3897  int i, ret, key;
3898  static int64_t last_time;
3899  if (received_nb_signals)
3900  return AVERROR_EXIT;
3901  /* read_key() returns 0 on EOF */
3902  if(cur_time - last_time >= 100000 && !run_as_daemon){
3903  key = read_key();
3904  last_time = cur_time;
3905  }else
3906  key = -1;
3907  if (key == 'q')
3908  return AVERROR_EXIT;
3909  if (key == '+') av_log_set_level(av_log_get_level()+10);
3910  if (key == '-') av_log_set_level(av_log_get_level()-10);
3911  if (key == 's') qp_hist ^= 1;
3912  if (key == 'h'){
3913  if (do_hex_dump){
3914  do_hex_dump = do_pkt_dump = 0;
3915  } else if(do_pkt_dump){
3916  do_hex_dump = 1;
3917  } else
3918  do_pkt_dump = 1;
3920  }
3921  if (key == 'c' || key == 'C'){
3922  char buf[4096], target[64], command[256], arg[256] = {0};
3923  double time;
3924  int k, n = 0;
3925  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3926  i = 0;
3927  set_tty_echo(1);
3928  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3929  if (k > 0)
3930  buf[i++] = k;
3931  buf[i] = 0;
3932  set_tty_echo(0);
3933  fprintf(stderr, "\n");
3934  if (k > 0 &&
3935  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3936  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3937  target, time, command, arg);
3938  for (i = 0; i < nb_filtergraphs; i++) {
3939  FilterGraph *fg = filtergraphs[i];
3940  if (fg->graph) {
3941  if (time < 0) {
3942  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3943  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3944  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3945  } else if (key == 'c') {
3946  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3947  ret = AVERROR_PATCHWELCOME;
3948  } else {
3949  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3950  if (ret < 0)
3951  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3952  }
3953  }
3954  }
3955  } else {
3957  "Parse error, at least 3 arguments were expected, "
3958  "only %d given in string '%s'\n", n, buf);
3959  }
3960  }
3961  if (key == 'd' || key == 'D'){
3962  int debug=0;
3963  if(key == 'D') {
3964  debug = input_streams[0]->st->codec->debug<<1;
3965  if(!debug) debug = 1;
3966  while(debug & (FF_DEBUG_DCT_COEFF
3967 #if FF_API_DEBUG_MV
3968  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3969 #endif
3970  )) //unsupported, would just crash
3971  debug += debug;
3972  }else{
3973  char buf[32];
3974  int k = 0;
3975  i = 0;
3976  set_tty_echo(1);
3977  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3978  if (k > 0)
3979  buf[i++] = k;
3980  buf[i] = 0;
3981  set_tty_echo(0);
3982  fprintf(stderr, "\n");
3983  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3984  fprintf(stderr,"error parsing debug value\n");
3985  }
3986  for(i=0;i<nb_input_streams;i++) {
3987  input_streams[i]->st->codec->debug = debug;
3988  }
3989  for(i=0;i<nb_output_streams;i++) {
3990  OutputStream *ost = output_streams[i];
3991  ost->enc_ctx->debug = debug;
3992  }
3993  if(debug) av_log_set_level(AV_LOG_DEBUG);
3994  fprintf(stderr,"debug=%d\n", debug);
3995  }
3996  if (key == '?'){
3997  fprintf(stderr, "key function\n"
3998  "? show this help\n"
3999  "+ increase verbosity\n"
4000  "- decrease verbosity\n"
4001  "c Send command to first matching filter supporting it\n"
4002  "C Send/Queue command to all matching filters\n"
4003  "D cycle through available debug modes\n"
4004  "h dump packets/hex press to cycle through the 3 states\n"
4005  "q quit\n"
4006  "s Show QP histogram\n"
4007  );
4008  }
4009  return 0;
4010 }
4011 
4012 #if HAVE_THREADS
4013 static void *input_thread(void *arg)
4014 {
4015  InputFile *f = arg;
4016  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4017  int ret = 0;
4018 
4019  while (1) {
4020  AVPacket pkt;
4021  ret = av_read_frame(f->ctx, &pkt);
4022 
4023  if (ret == AVERROR(EAGAIN)) {
4024  av_usleep(10000);
4025  continue;
4026  }
4027  if (ret < 0) {
4028  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4029  break;
4030  }
4031  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4032  if (flags && ret == AVERROR(EAGAIN)) {
4033  flags = 0;
4034  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4036  "Thread message queue blocking; consider raising the "
4037  "thread_queue_size option (current value: %d)\n",
4038  f->thread_queue_size);
4039  }
4040  if (ret < 0) {
4041  if (ret != AVERROR_EOF)
4042  av_log(f->ctx, AV_LOG_ERROR,
4043  "Unable to send packet to main thread: %s\n",
4044  av_err2str(ret));
4045  av_packet_unref(&pkt);
4046  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4047  break;
4048  }
4049  }
4050 
4051  return NULL;
4052 }
4053 
4054 static void free_input_thread(int i)
4055 {
4056  InputFile *f = input_files[i];
4057  AVPacket pkt;
4058 
4059  if (!f || !f->in_thread_queue)
4060  return;
4062  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4063  av_packet_unref(&pkt);
4064 
4065  pthread_join(f->thread, NULL);
4066  f->joined = 1;
4067  av_thread_message_queue_free(&f->in_thread_queue);
4068 }
4069 
4070 static void free_input_threads(void)
4071 {
4072  int i;
4073 
4074  for (i = 0; i < nb_input_files; i++)
4075  free_input_thread(i);
4076 }
4077 
4078 static int init_input_thread(int i)
4079 {
4080  int ret;
4081  InputFile *f = input_files[i];
4082 
4083  if (nb_input_files == 1)
4084  return 0;
4085 
4086  if (f->ctx->pb ? !f->ctx->pb->seekable :
4087  strcmp(f->ctx->iformat->name, "lavfi"))
4088  f->non_blocking = 1;
4089  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4090  f->thread_queue_size, sizeof(AVPacket));
4091  if (ret < 0)
4092  return ret;
4093 
4094  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4095  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4096  av_thread_message_queue_free(&f->in_thread_queue);
4097  return AVERROR(ret);
4098  }
4099 
4100  return 0;
4101 }
4102 
4103 static int init_input_threads(void)
4104 {
4105  int i, ret;
4106 
4107  for (i = 0; i < nb_input_files; i++) {
4108  ret = init_input_thread(i);
4109  if (ret < 0)
4110  return ret;
4111  }
4112  return 0;
4113 }
4114 
4115 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4116 {
4117  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4118  f->non_blocking ?
4120 }
4121 #endif
4122 
4124 {
4125  if (f->rate_emu) {
4126  int i;
4127  for (i = 0; i < f->nb_streams; i++) {
4128  InputStream *ist = input_streams[f->ist_index + i];
4129  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4130  int64_t now = av_gettime_relative() - ist->start;
4131  if (pts > now)
4132  return AVERROR(EAGAIN);
4133  }
4134  }
4135 
4136 #if HAVE_THREADS
4137  if (nb_input_files > 1)
4138  return get_input_packet_mt(f, pkt);
4139 #endif
4140  return av_read_frame(f->ctx, pkt);
4141 }
4142 
4143 static int got_eagain(void)
4144 {
4145  int i;
4146  for (i = 0; i < nb_output_streams; i++)
4147  if (output_streams[i]->unavailable)
4148  return 1;
4149  return 0;
4150 }
4151 
4152 static void reset_eagain(void)
4153 {
4154  int i;
4155  for (i = 0; i < nb_input_files; i++)
4156  input_files[i]->eagain = 0;
4157  for (i = 0; i < nb_output_streams; i++)
4158  output_streams[i]->unavailable = 0;
4159 }
4160 
4161 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4162 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4163  AVRational time_base)
4164 {
4165  int ret;
4166 
4167  if (!*duration) {
4168  *duration = tmp;
4169  return tmp_time_base;
4170  }
4171 
4172  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4173  if (ret < 0) {
4174  *duration = tmp;
4175  return tmp_time_base;
4176  }
4177 
4178  return time_base;
4179 }
4180 
4182 {
4183  InputStream *ist;
4184  AVCodecContext *avctx;
4185  int i, ret, has_audio = 0;
4186  int64_t duration = 0;
4187 
4188  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4189  if (ret < 0)
4190  return ret;
4191 
4192  for (i = 0; i < ifile->nb_streams; i++) {
4193  ist = input_streams[ifile->ist_index + i];
4194  avctx = ist->dec_ctx;
4195 
4196  /* duration is the length of the last frame in a stream
4197  * when audio stream is present we don't care about
4198  * last video frame length because it's not defined exactly */
4199  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4200  has_audio = 1;
4201  }
4202 
4203  for (i = 0; i < ifile->nb_streams; i++) {
4204  ist = input_streams[ifile->ist_index + i];
4205  avctx = ist->dec_ctx;
4206 
4207  if (has_audio) {
4208  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4209  AVRational sample_rate = {1, avctx->sample_rate};
4210 
4211  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4212  } else {
4213  continue;
4214  }
4215  } else {
4216  if (ist->framerate.num) {
4217  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4218  } else if (ist->st->avg_frame_rate.num) {
4219  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4220  } else {
4221  duration = 1;
4222  }
4223  }
4224  if (!ifile->duration)
4225  ifile->time_base = ist->st->time_base;
4226  /* the total duration of the stream, max_pts - min_pts is
4227  * the duration of the stream without the last frame */
4228  duration += ist->max_pts - ist->min_pts;
4229  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4230  ifile->time_base);
4231  }
4232 
4233  if (ifile->loop > 0)
4234  ifile->loop--;
4235 
4236  return ret;
4237 }
4238 
4239 /*
4240  * Return
4241  * - 0 -- one packet was read and processed
4242  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4243  * this function should be called again
4244  * - AVERROR_EOF -- this function should not be called again
4245  */
4246 static int process_input(int file_index)
4247 {
4248  InputFile *ifile = input_files[file_index];
4250  InputStream *ist;
4251  AVPacket pkt;
4252  int ret, thread_ret, i, j;
4253  int64_t duration;
4254  int64_t pkt_dts;
4255 
4256  is = ifile->ctx;
4257  ret = get_input_packet(ifile, &pkt);
4258 
4259  if (ret == AVERROR(EAGAIN)) {
4260  ifile->eagain = 1;
4261  return ret;
4262  }
4263  if (ret < 0 && ifile->loop) {
4264  AVCodecContext *avctx;
4265  for (i = 0; i < ifile->nb_streams; i++) {
4266  ist = input_streams[ifile->ist_index + i];
4267  avctx = ist->dec_ctx;
4268  if (ist->decoding_needed) {
4269  ret = process_input_packet(ist, NULL, 1);
4270  if (ret>0)
4271  return 0;
4272  avcodec_flush_buffers(avctx);
4273  }
4274  }
4275 #if HAVE_THREADS
4276  free_input_thread(file_index);
4277 #endif
4278  ret = seek_to_start(ifile, is);
4279 #if HAVE_THREADS
4280  thread_ret = init_input_thread(file_index);
4281  if (thread_ret < 0)
4282  return thread_ret;
4283 #endif
4284  if (ret < 0)
4285  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4286  else
4287  ret = get_input_packet(ifile, &pkt);
4288  if (ret == AVERROR(EAGAIN)) {
4289  ifile->eagain = 1;
4290  return ret;
4291  }
4292  }
4293  if (ret < 0) {
4294  if (ret != AVERROR_EOF) {
4295  print_error(is->url, ret);
4296  if (exit_on_error)
4297  exit_program(1);
4298  }
4299 
4300  for (i = 0; i < ifile->nb_streams; i++) {
4301  ist = input_streams[ifile->ist_index + i];
4302  if (ist->decoding_needed) {
4303  ret = process_input_packet(ist, NULL, 0);
4304  if (ret>0)
4305  return 0;
4306  }
4307 
4308  /* mark all outputs that don't go through lavfi as finished */
4309  for (j = 0; j < nb_output_streams; j++) {
4310  OutputStream *ost = output_streams[j];
4311 
4312  if (ost->source_index == ifile->ist_index + i &&
4313  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4314  finish_output_stream(ost);
4315  }
4316  }
4317 
4318  ifile->eof_reached = 1;
4319  return AVERROR(EAGAIN);
4320  }
4321 
4322  reset_eagain();
4323 
4324  if (do_pkt_dump) {
4326  is->streams[pkt.stream_index]);
4327  }
4328  /* the following test is needed in case new streams appear
4329  dynamically in stream : we ignore them */
4330  if (pkt.stream_index >= ifile->nb_streams) {
4331  report_new_stream(file_index, &pkt);
4332  goto discard_packet;
4333  }
4334 
4335  ist = input_streams[ifile->ist_index + pkt.stream_index];
4336 
4337  ist->data_size += pkt.size;
4338  ist->nb_packets++;
4339 
4340  if (ist->discard)
4341  goto discard_packet;
4342 
4343  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4345  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4346  if (exit_on_error)
4347  exit_program(1);
4348  }
4349 
4350  if (debug_ts) {
4351  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4352  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4356  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4357  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4358  av_ts2str(input_files[ist->file_index]->ts_offset),
4359  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4360  }
4361 
4362  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4363  int64_t stime, stime2;
4364  // Correcting starttime based on the enabled streams
4365  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4366  // so we instead do it here as part of discontinuity handling
4367  if ( ist->next_dts == AV_NOPTS_VALUE
4368  && ifile->ts_offset == -is->start_time
4369  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4370  int64_t new_start_time = INT64_MAX;
4371  for (i=0; i<is->nb_streams; i++) {
4372  AVStream *st = is->streams[i];
4373  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4374  continue;
4375  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4376  }
4377  if (new_start_time > is->start_time) {
4378  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4379  ifile->ts_offset = -new_start_time;
4380  }
4381  }
4382 
4383  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4384  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4385  ist->wrap_correction_done = 1;
4386 
4387  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4388  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4389  ist->wrap_correction_done = 0;
4390  }
4391  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4392  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4393  ist->wrap_correction_done = 0;
4394  }
4395  }
4396 
4397  /* add the stream-global side data to the first packet */
4398  if (ist->nb_packets == 1) {
4399  for (i = 0; i < ist->st->nb_side_data; i++) {
4400  AVPacketSideData *src_sd = &ist->st->side_data[i];
4401  uint8_t *dst_data;
4402 
4403  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4404  continue;
4405 
4406  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4407  continue;
4408 
4409  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4410  if (!dst_data)
4411  exit_program(1);
4412 
4413  memcpy(dst_data, src_sd->data, src_sd->size);
4414  }
4415  }
4416 
4417  if (pkt.dts != AV_NOPTS_VALUE)
4418  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4419  if (pkt.pts != AV_NOPTS_VALUE)
4420  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4421 
4422  if (pkt.pts != AV_NOPTS_VALUE)
4423  pkt.pts *= ist->ts_scale;
4424  if (pkt.dts != AV_NOPTS_VALUE)
4425  pkt.dts *= ist->ts_scale;
4426 
4428  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4430  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4431  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4432  int64_t delta = pkt_dts - ifile->last_ts;
4433  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4434  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4435  ifile->ts_offset -= delta;
4437  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4438  delta, ifile->ts_offset);
4439  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4440  if (pkt.pts != AV_NOPTS_VALUE)
4441  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4442  }
4443  }
4444 
4445  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4446  if (pkt.pts != AV_NOPTS_VALUE) {
4447  pkt.pts += duration;
4448  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4449  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4450  }
4451 
4452  if (pkt.dts != AV_NOPTS_VALUE)
4453  pkt.dts += duration;
4454 
4456  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4458  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4459  !copy_ts) {
4460  int64_t delta = pkt_dts - ist->next_dts;
4461  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4462  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4463  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4464  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4465  ifile->ts_offset -= delta;
4467  "timestamp discontinuity for stream #%d:%d "
4468  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4469  ist->file_index, ist->st->index, ist->st->id,
4471  delta, ifile->ts_offset);
4472  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4473  if (pkt.pts != AV_NOPTS_VALUE)
4474  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4475  }
4476  } else {
4477  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4478  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4479  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4480  pkt.dts = AV_NOPTS_VALUE;
4481  }
4482  if (pkt.pts != AV_NOPTS_VALUE){
4483  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4484  delta = pkt_pts - ist->next_dts;
4485  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4486  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4487  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4488  pkt.pts = AV_NOPTS_VALUE;
4489  }
4490  }
4491  }
4492  }
4493 
4494  if (pkt.dts != AV_NOPTS_VALUE)
4495  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4496 
4497  if (debug_ts) {
4498  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4500  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4501  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4502  av_ts2str(input_files[ist->file_index]->ts_offset),
4503  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4504  }
4505 
4506  sub2video_heartbeat(ist, pkt.pts);
4507 
4508  process_input_packet(ist, &pkt, 0);
4509 
4510 discard_packet:
4511  av_packet_unref(&pkt);
4512 
4513  return 0;
4514 }
4515 
4516 /**
4517  * Perform a step of transcoding for the specified filter graph.
4518  *
4519  * @param[in] graph filter graph to consider
4520  * @param[out] best_ist input stream where a frame would allow to continue
4521  * @return 0 for success, <0 for error
4522  */
4523 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4524 {
4525  int i, ret;
4526  int nb_requests, nb_requests_max = 0;
4527  InputFilter *ifilter;
4528  InputStream *ist;
4529 
4530  *best_ist = NULL;
4531  ret = avfilter_graph_request_oldest(graph->graph);
4532  if (ret >= 0)
4533  return reap_filters(0);
4534 
4535  if (ret == AVERROR_EOF) {
4536  ret = reap_filters(1);
4537  for (i = 0; i < graph->nb_outputs; i++)
4538  close_output_stream(graph->outputs[i]->ost);
4539  return ret;
4540  }
4541  if (ret != AVERROR(EAGAIN))
4542  return ret;
4543 
4544  for (i = 0; i < graph->nb_inputs; i++) {
4545  ifilter = graph->inputs[i];
4546  ist = ifilter->ist;
4547  if (input_files[ist->file_index]->eagain ||
4548  input_files[ist->file_index]->eof_reached)
4549  continue;
4550  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4551  if (nb_requests > nb_requests_max) {
4552  nb_requests_max = nb_requests;
4553  *best_ist = ist;
4554  }
4555  }
4556 
4557  if (!*best_ist)
4558  for (i = 0; i < graph->nb_outputs; i++)
4559  graph->outputs[i]->ost->unavailable = 1;
4560 
4561  return 0;
4562 }
4563 
4564 /**