FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570  av_dict_free(&ost->swr_opts);
571 
574 
575  if (ost->muxing_queue) {
576  while (av_fifo_size(ost->muxing_queue)) {
577  AVPacket pkt;
578  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
579  av_packet_unref(&pkt);
580  }
582  }
583 
584  av_freep(&output_streams[i]);
585  }
586 #if HAVE_THREADS
587  free_input_threads();
588 #endif
589  for (i = 0; i < nb_input_files; i++) {
590  avformat_close_input(&input_files[i]->ctx);
591  av_freep(&input_files[i]);
592  }
593  for (i = 0; i < nb_input_streams; i++) {
594  InputStream *ist = input_streams[i];
595 
598  av_dict_free(&ist->decoder_opts);
601  av_freep(&ist->filters);
602  av_freep(&ist->hwaccel_device);
603  av_freep(&ist->dts_buffer);
604 
606 
607  av_freep(&input_streams[i]);
608  }
609 
610  if (vstats_file) {
611  if (fclose(vstats_file))
613  "Error closing vstats file, loss of information possible: %s\n",
614  av_err2str(AVERROR(errno)));
615  }
617 
618  av_freep(&input_streams);
619  av_freep(&input_files);
620  av_freep(&output_streams);
621  av_freep(&output_files);
622 
623  uninit_opts();
624 
626 
627  if (received_sigterm) {
628  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629  (int) received_sigterm);
630  } else if (ret && atomic_load(&transcode_init_done)) {
631  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632  }
633  term_exit();
634  ffmpeg_exited = 1;
635 }
636 
638 {
639  AVDictionaryEntry *t = NULL;
640 
641  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
643  }
644 }
645 
647 {
649  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
651  exit_program(1);
652  }
653 }
654 
655 static void abort_codec_experimental(AVCodec *c, int encoder)
656 {
657  exit_program(1);
658 }
659 
660 static void update_benchmark(const char *fmt, ...)
661 {
662  if (do_benchmark_all) {
664  va_list va;
665  char buf[1024];
666 
667  if (fmt) {
668  va_start(va, fmt);
669  vsnprintf(buf, sizeof(buf), fmt, va);
670  va_end(va);
672  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
673  t.user_usec - current_time.user_usec,
674  t.sys_usec - current_time.sys_usec,
675  t.real_usec - current_time.real_usec, buf);
676  }
677  current_time = t;
678  }
679 }
680 
682 {
683  int i;
684  for (i = 0; i < nb_output_streams; i++) {
685  OutputStream *ost2 = output_streams[i];
686  ost2->finished |= ost == ost2 ? this_stream : others;
687  }
688 }
689 
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 {
692  AVFormatContext *s = of->ctx;
693  AVStream *st = ost->st;
694  int ret;
695 
696  /*
697  * Audio encoders may split the packets -- #frames in != #packets out.
698  * But there is no reordering, so we can limit the number of output packets
699  * by simply dropping them here.
700  * Counting encoded video frames needs to be done separately because of
701  * reordering, see do_video_out().
702  * Do not count the packet when unqueued because it has been counted when queued.
703  */
704  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705  if (ost->frame_number >= ost->max_frames) {
706  av_packet_unref(pkt);
707  return;
708  }
709  ost->frame_number++;
710  }
711 
712  if (!of->header_written) {
713  AVPacket tmp_pkt = {0};
714  /* the muxer is not initialized yet, buffer the packet */
715  if (!av_fifo_space(ost->muxing_queue)) {
716  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717  ost->max_muxing_queue_size);
718  if (new_size <= av_fifo_size(ost->muxing_queue)) {
720  "Too many packets buffered for output stream %d:%d.\n",
721  ost->file_index, ost->st->index);
722  exit_program(1);
723  }
724  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
725  if (ret < 0)
726  exit_program(1);
727  }
728  ret = av_packet_make_refcounted(pkt);
729  if (ret < 0)
730  exit_program(1);
731  av_packet_move_ref(&tmp_pkt, pkt);
732  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
733  return;
734  }
735 
738  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 
740  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741  int i;
743  NULL);
744  ost->quality = sd ? AV_RL32(sd) : -1;
745  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 
747  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748  if (sd && i < sd[5])
749  ost->error[i] = AV_RL64(sd + 8 + 8*i);
750  else
751  ost->error[i] = -1;
752  }
753 
754  if (ost->frame_rate.num && ost->is_cfr) {
755  if (pkt->duration > 0)
756  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
758  ost->mux_timebase);
759  }
760  }
761 
762  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 
764  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765  if (pkt->dts != AV_NOPTS_VALUE &&
766  pkt->pts != AV_NOPTS_VALUE &&
767  pkt->dts > pkt->pts) {
768  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769  pkt->dts, pkt->pts,
770  ost->file_index, ost->st->index);
771  pkt->pts =
772  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775  }
777  pkt->dts != AV_NOPTS_VALUE &&
778  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779  ost->last_mux_dts != AV_NOPTS_VALUE) {
780  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781  if (pkt->dts < max) {
782  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783  av_log(s, loglevel, "Non-monotonous DTS in output stream "
784  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786  if (exit_on_error) {
787  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
788  exit_program(1);
789  }
790  av_log(s, loglevel, "changing to %"PRId64". This may result "
791  "in incorrect timestamps in the output file.\n",
792  max);
793  if (pkt->pts >= pkt->dts)
794  pkt->pts = FFMAX(pkt->pts, max);
795  pkt->dts = max;
796  }
797  }
798  }
799  ost->last_mux_dts = pkt->dts;
800 
801  ost->data_size += pkt->size;
802  ost->packets_written++;
803 
804  pkt->stream_index = ost->index;
805 
806  if (debug_ts) {
807  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
810  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
811  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
812  pkt->size
813  );
814  }
815 
816  ret = av_interleaved_write_frame(s, pkt);
817  if (ret < 0) {
818  print_error("av_interleaved_write_frame()", ret);
819  main_return_code = 1;
821  }
822  av_packet_unref(pkt);
823 }
824 
826 {
827  OutputFile *of = output_files[ost->file_index];
828 
829  ost->finished |= ENCODER_FINISHED;
830  if (of->shortest) {
831  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
832  of->recording_time = FFMIN(of->recording_time, end);
833  }
834 }
835 
836 /*
837  * Send a single packet to the output, applying any bitstream filters
838  * associated with the output stream. This may result in any number
839  * of packets actually being written, depending on what bitstream
840  * filters are applied. The supplied packet is consumed and will be
841  * blank (as if newly-allocated) when this function returns.
842  *
843  * If eof is set, instead indicate EOF to all bitstream filters and
844  * therefore flush any delayed packets to the output. A blank packet
845  * must be supplied in this case.
846  */
848  OutputStream *ost, int eof)
849 {
850  int ret = 0;
851 
852  /* apply the output bitstream filters, if any */
853  if (ost->nb_bitstream_filters) {
854  int idx;
855 
856  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
857  if (ret < 0)
858  goto finish;
859 
860  eof = 0;
861  idx = 1;
862  while (idx) {
863  /* get a packet from the previous filter up the chain */
864  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865  if (ret == AVERROR(EAGAIN)) {
866  ret = 0;
867  idx--;
868  continue;
869  } else if (ret == AVERROR_EOF) {
870  eof = 1;
871  } else if (ret < 0)
872  goto finish;
873 
874  /* send it to the next filter down the chain or to the muxer */
875  if (idx < ost->nb_bitstream_filters) {
876  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
877  if (ret < 0)
878  goto finish;
879  idx++;
880  eof = 0;
881  } else if (eof)
882  goto finish;
883  else
884  write_packet(of, pkt, ost, 0);
885  }
886  } else if (!eof)
887  write_packet(of, pkt, ost, 0);
888 
889 finish:
890  if (ret < 0 && ret != AVERROR_EOF) {
891  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
893  if(exit_on_error)
894  exit_program(1);
895  }
896 }
897 
899 {
900  OutputFile *of = output_files[ost->file_index];
901 
902  if (of->recording_time != INT64_MAX &&
904  AV_TIME_BASE_Q) >= 0) {
905  close_output_stream(ost);
906  return 0;
907  }
908  return 1;
909 }
910 
912  AVFrame *frame)
913 {
914  AVCodecContext *enc = ost->enc_ctx;
915  AVPacket pkt;
916  int ret;
917 
918  av_init_packet(&pkt);
919  pkt.data = NULL;
920  pkt.size = 0;
921 
922  if (!check_recording_time(ost))
923  return;
924 
925  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926  frame->pts = ost->sync_opts;
927  ost->sync_opts = frame->pts + frame->nb_samples;
928  ost->samples_encoded += frame->nb_samples;
929  ost->frames_encoded++;
930 
931  av_assert0(pkt.size || !pkt.data);
933  if (debug_ts) {
934  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937  enc->time_base.num, enc->time_base.den);
938  }
939 
940  ret = avcodec_send_frame(enc, frame);
941  if (ret < 0)
942  goto error;
943 
944  while (1) {
945  ret = avcodec_receive_packet(enc, &pkt);
946  if (ret == AVERROR(EAGAIN))
947  break;
948  if (ret < 0)
949  goto error;
950 
951  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 
953  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
954 
955  if (debug_ts) {
956  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
958  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
959  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
960  }
961 
962  output_packet(of, &pkt, ost, 0);
963  }
964 
965  return;
966 error:
967  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
968  exit_program(1);
969 }
970 
971 static void do_subtitle_out(OutputFile *of,
972  OutputStream *ost,
973  AVSubtitle *sub)
974 {
975  int subtitle_out_max_size = 1024 * 1024;
976  int subtitle_out_size, nb, i;
977  AVCodecContext *enc;
978  AVPacket pkt;
979  int64_t pts;
980 
981  if (sub->pts == AV_NOPTS_VALUE) {
982  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
983  if (exit_on_error)
984  exit_program(1);
985  return;
986  }
987 
988  enc = ost->enc_ctx;
989 
990  if (!subtitle_out) {
991  subtitle_out = av_malloc(subtitle_out_max_size);
992  if (!subtitle_out) {
993  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
994  exit_program(1);
995  }
996  }
997 
998  /* Note: DVB subtitle need one packet to draw them and one other
999  packet to clear them */
1000  /* XXX: signal it in the codec context ? */
1001  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1002  nb = 2;
1003  else
1004  nb = 1;
1005 
1006  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007  pts = sub->pts;
1008  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009  pts -= output_files[ost->file_index]->start_time;
1010  for (i = 0; i < nb; i++) {
1011  unsigned save_num_rects = sub->num_rects;
1012 
1013  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  sub->pts = pts;
1018  // start_display_time is required to be 0
1019  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020  sub->end_display_time -= sub->start_display_time;
1021  sub->start_display_time = 0;
1022  if (i == 1)
1023  sub->num_rects = 0;
1024 
1025  ost->frames_encoded++;
1026 
1027  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028  subtitle_out_max_size, sub);
1029  if (i == 1)
1030  sub->num_rects = save_num_rects;
1031  if (subtitle_out_size < 0) {
1032  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1033  exit_program(1);
1034  }
1035 
1036  av_init_packet(&pkt);
1037  pkt.data = subtitle_out;
1038  pkt.size = subtitle_out_size;
1039  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042  /* XXX: the pts correction is handled here. Maybe handling
1043  it in the codec would be better */
1044  if (i == 0)
1045  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046  else
1047  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1048  }
1049  pkt.dts = pkt.pts;
1050  output_packet(of, &pkt, ost, 0);
1051  }
1052 }
1053 
1054 static void do_video_out(OutputFile *of,
1055  OutputStream *ost,
1056  AVFrame *next_picture,
1057  double sync_ipts)
1058 {
1059  int ret, format_video_sync;
1060  AVPacket pkt;
1061  AVCodecContext *enc = ost->enc_ctx;
1062  AVCodecParameters *mux_par = ost->st->codecpar;
1063  AVRational frame_rate;
1064  int nb_frames, nb0_frames, i;
1065  double delta, delta0;
1066  double duration = 0;
1067  int frame_size = 0;
1068  InputStream *ist = NULL;
1070 
1071  if (ost->source_index >= 0)
1072  ist = input_streams[ost->source_index];
1073 
1074  frame_rate = av_buffersink_get_frame_rate(filter);
1075  if (frame_rate.num > 0 && frame_rate.den > 0)
1076  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 
1078  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 
1081  if (!ost->filters_script &&
1082  !ost->filters &&
1083  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1084  next_picture &&
1085  ist &&
1086  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1087  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088  }
1089 
1090  if (!next_picture) {
1091  //end, flushing
1092  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1093  ost->last_nb0_frames[1],
1094  ost->last_nb0_frames[2]);
1095  } else {
1096  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1097  delta = delta0 + duration;
1098 
1099  /* by default, we output a single frame */
1100  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101  nb_frames = 1;
1102 
1103  format_video_sync = video_sync_method;
1104  if (format_video_sync == VSYNC_AUTO) {
1105  if(!strcmp(of->ctx->oformat->name, "avi")) {
1106  format_video_sync = VSYNC_VFR;
1107  } else
1108  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1109  if ( ist
1110  && format_video_sync == VSYNC_CFR
1111  && input_files[ist->file_index]->ctx->nb_streams == 1
1112  && input_files[ist->file_index]->input_ts_offset == 0) {
1113  format_video_sync = VSYNC_VSCFR;
1114  }
1115  if (format_video_sync == VSYNC_CFR && copy_ts) {
1116  format_video_sync = VSYNC_VSCFR;
1117  }
1118  }
1119  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1120 
1121  if (delta0 < 0 &&
1122  delta > 0 &&
1123  format_video_sync != VSYNC_PASSTHROUGH &&
1124  format_video_sync != VSYNC_DROP) {
1125  if (delta0 < -0.6) {
1126  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1127  } else
1128  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1129  sync_ipts = ost->sync_opts;
1130  duration += delta0;
1131  delta0 = 0;
1132  }
1133 
1134  switch (format_video_sync) {
1135  case VSYNC_VSCFR:
1136  if (ost->frame_number == 0 && delta0 >= 0.5) {
1137  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138  delta = duration;
1139  delta0 = 0;
1140  ost->sync_opts = lrint(sync_ipts);
1141  }
1142  case VSYNC_CFR:
1143  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1145  nb_frames = 0;
1146  } else if (delta < -1.1)
1147  nb_frames = 0;
1148  else if (delta > 1.1) {
1149  nb_frames = lrintf(delta);
1150  if (delta0 > 1.1)
1151  nb0_frames = lrintf(delta0 - 0.6);
1152  }
1153  break;
1154  case VSYNC_VFR:
1155  if (delta <= -0.6)
1156  nb_frames = 0;
1157  else if (delta > 0.6)
1158  ost->sync_opts = lrint(sync_ipts);
1159  break;
1160  case VSYNC_DROP:
1161  case VSYNC_PASSTHROUGH:
1162  ost->sync_opts = lrint(sync_ipts);
1163  break;
1164  default:
1165  av_assert0(0);
1166  }
1167  }
1168 
1169  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1170  nb0_frames = FFMIN(nb0_frames, nb_frames);
1171 
1172  memmove(ost->last_nb0_frames + 1,
1173  ost->last_nb0_frames,
1174  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1175  ost->last_nb0_frames[0] = nb0_frames;
1176 
1177  if (nb0_frames == 0 && ost->last_dropped) {
1178  nb_frames_drop++;
1180  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1181  ost->frame_number, ost->st->index, ost->last_frame->pts);
1182  }
1183  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1184  if (nb_frames > dts_error_threshold * 30) {
1185  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1186  nb_frames_drop++;
1187  return;
1188  }
1189  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1190  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1191  if (nb_frames_dup > dup_warning) {
1192  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1193  dup_warning *= 10;
1194  }
1195  }
1196  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1197 
1198  /* duplicates frame if needed */
1199  for (i = 0; i < nb_frames; i++) {
1200  AVFrame *in_picture;
1201  int forced_keyframe = 0;
1202  double pts_time;
1203  av_init_packet(&pkt);
1204  pkt.data = NULL;
1205  pkt.size = 0;
1206 
1207  if (i < nb0_frames && ost->last_frame) {
1208  in_picture = ost->last_frame;
1209  } else
1210  in_picture = next_picture;
1211 
1212  if (!in_picture)
1213  return;
1214 
1215  in_picture->pts = ost->sync_opts;
1216 
1217  if (!check_recording_time(ost))
1218  return;
1219 
1221  ost->top_field_first >= 0)
1222  in_picture->top_field_first = !!ost->top_field_first;
1223 
1224  if (in_picture->interlaced_frame) {
1225  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227  else
1228  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229  } else
1230  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 
1232  in_picture->quality = enc->global_quality;
1233  in_picture->pict_type = 0;
1234 
1235  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1236  in_picture->pts != AV_NOPTS_VALUE)
1237  ost->forced_kf_ref_pts = in_picture->pts;
1238 
1239  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1241  if (ost->forced_kf_index < ost->forced_kf_count &&
1242  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243  ost->forced_kf_index++;
1244  forced_keyframe = 1;
1245  } else if (ost->forced_keyframes_pexpr) {
1246  double res;
1247  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1250  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1256  res);
1257  if (res) {
1258  forced_keyframe = 1;
1264  }
1265 
1267  } else if ( ost->forced_keyframes
1268  && !strncmp(ost->forced_keyframes, "source", 6)
1269  && in_picture->key_frame==1) {
1270  forced_keyframe = 1;
1271  }
1272 
1273  if (forced_keyframe) {
1274  in_picture->pict_type = AV_PICTURE_TYPE_I;
1275  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276  }
1277 
1279  if (debug_ts) {
1280  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283  enc->time_base.num, enc->time_base.den);
1284  }
1285 
1286  ost->frames_encoded++;
1287 
1288  ret = avcodec_send_frame(enc, in_picture);
1289  if (ret < 0)
1290  goto error;
1291  // Make sure Closed Captions will not be duplicated
1293 
1294  while (1) {
1295  ret = avcodec_receive_packet(enc, &pkt);
1296  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1297  if (ret == AVERROR(EAGAIN))
1298  break;
1299  if (ret < 0)
1300  goto error;
1301 
1302  if (debug_ts) {
1303  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1304  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1305  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1306  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1307  }
1308 
1309  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1310  pkt.pts = ost->sync_opts;
1311 
1312  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 
1314  if (debug_ts) {
1315  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1316  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1317  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1318  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319  }
1320 
1321  frame_size = pkt.size;
1322  output_packet(of, &pkt, ost, 0);
1323 
1324  /* if two pass, output log */
1325  if (ost->logfile && enc->stats_out) {
1326  fprintf(ost->logfile, "%s", enc->stats_out);
1327  }
1328  }
1329  ost->sync_opts++;
1330  /*
1331  * For video, number of frames in == number of packets out.
1332  * But there may be reordering, so we can't throw away frames on encoder
1333  * flush, we need to limit them here, before they go into encoder.
1334  */
1335  ost->frame_number++;
1336 
1337  if (vstats_filename && frame_size)
1338  do_video_stats(ost, frame_size);
1339  }
1340 
1341  if (!ost->last_frame)
1342  ost->last_frame = av_frame_alloc();
1343  av_frame_unref(ost->last_frame);
1344  if (next_picture && ost->last_frame)
1345  av_frame_ref(ost->last_frame, next_picture);
1346  else
1347  av_frame_free(&ost->last_frame);
1348 
1349  return;
1350 error:
1351  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352  exit_program(1);
1353 }
1354 
1355 static double psnr(double d)
1356 {
1357  return -10.0 * log10(d);
1358 }
1359 
1361 {
1362  AVCodecContext *enc;
1363  int frame_number;
1364  double ti1, bitrate, avg_bitrate;
1365 
1366  /* this is executed just the first time do_video_stats is called */
1367  if (!vstats_file) {
1368  vstats_file = fopen(vstats_filename, "w");
1369  if (!vstats_file) {
1370  perror("fopen");
1371  exit_program(1);
1372  }
1373  }
1374 
1375  enc = ost->enc_ctx;
1376  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377  frame_number = ost->st->nb_frames;
1378  if (vstats_version <= 1) {
1379  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380  ost->quality / (float)FF_QP2LAMBDA);
1381  } else {
1382  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383  ost->quality / (float)FF_QP2LAMBDA);
1384  }
1385 
1386  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 
1389  fprintf(vstats_file,"f_size= %6d ", frame_size);
1390  /* compute pts value */
1391  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392  if (ti1 < 0.01)
1393  ti1 = 0.01;
1394 
1395  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400  }
1401 }
1402 
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 
1406 {
1407  OutputFile *of = output_files[ost->file_index];
1408  int i;
1409 
1411 
1412  if (of->shortest) {
1413  for (i = 0; i < of->ctx->nb_streams; i++)
1414  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1415  }
1416 }
1417 
1418 /**
1419  * Get and encode new output from any of the filtergraphs, without causing
1420  * activity.
1421  *
1422  * @return 0 for success, <0 for severe errors
1423  */
1424 static int reap_filters(int flush)
1425 {
1426  AVFrame *filtered_frame = NULL;
1427  int i;
1428 
1429  /* Reap all buffers present in the buffer sinks */
1430  for (i = 0; i < nb_output_streams; i++) {
1431  OutputStream *ost = output_streams[i];
1432  OutputFile *of = output_files[ost->file_index];
1434  AVCodecContext *enc = ost->enc_ctx;
1435  int ret = 0;
1436 
1437  if (!ost->filter || !ost->filter->graph->graph)
1438  continue;
1439  filter = ost->filter->filter;
1440 
1441  if (!ost->initialized) {
1442  char error[1024] = "";
1443  ret = init_output_stream(ost, error, sizeof(error));
1444  if (ret < 0) {
1445  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446  ost->file_index, ost->index, error);
1447  exit_program(1);
1448  }
1449  }
1450 
1451  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452  return AVERROR(ENOMEM);
1453  }
1454  filtered_frame = ost->filtered_frame;
1455 
1456  while (1) {
1457  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1460  if (ret < 0) {
1461  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1463  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464  } else if (flush && ret == AVERROR_EOF) {
1466  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1467  }
1468  break;
1469  }
1470  if (ost->finished) {
1471  av_frame_unref(filtered_frame);
1472  continue;
1473  }
1474  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1476  AVRational filter_tb = av_buffersink_get_time_base(filter);
1477  AVRational tb = enc->time_base;
1478  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 
1480  tb.den <<= extra_bits;
1481  float_pts =
1482  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1483  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1484  float_pts /= 1 << extra_bits;
1485  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 
1488  filtered_frame->pts =
1489  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1490  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491  }
1492 
1493  switch (av_buffersink_get_type(filter)) {
1494  case AVMEDIA_TYPE_VIDEO:
1495  if (!ost->frame_aspect_ratio.num)
1496  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 
1498  if (debug_ts) {
1499  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501  float_pts,
1502  enc->time_base.num, enc->time_base.den);
1503  }
1504 
1505  do_video_out(of, ost, filtered_frame, float_pts);
1506  break;
1507  case AVMEDIA_TYPE_AUDIO:
1508  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509  enc->channels != filtered_frame->channels) {
1511  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512  break;
1513  }
1514  do_audio_out(of, ost, filtered_frame);
1515  break;
1516  default:
1517  // TODO support subtitle filters
1518  av_assert0(0);
1519  }
1520 
1521  av_frame_unref(filtered_frame);
1522  }
1523  }
1524 
1525  return 0;
1526 }
1527 
1528 static void print_final_stats(int64_t total_size)
1529 {
1530  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531  uint64_t subtitle_size = 0;
1532  uint64_t data_size = 0;
1533  float percent = -1.0;
1534  int i, j;
1535  int pass1_used = 1;
1536 
1537  for (i = 0; i < nb_output_streams; i++) {
1538  OutputStream *ost = output_streams[i];
1539  switch (ost->enc_ctx->codec_type) {
1540  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543  default: other_size += ost->data_size; break;
1544  }
1545  extra_size += ost->enc_ctx->extradata_size;
1546  data_size += ost->data_size;
1549  pass1_used = 0;
1550  }
1551 
1552  if (data_size && total_size>0 && total_size >= data_size)
1553  percent = 100.0 * (total_size - data_size) / data_size;
1554 
1555  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556  video_size / 1024.0,
1557  audio_size / 1024.0,
1558  subtitle_size / 1024.0,
1559  other_size / 1024.0,
1560  extra_size / 1024.0);
1561  if (percent >= 0.0)
1562  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563  else
1564  av_log(NULL, AV_LOG_INFO, "unknown");
1565  av_log(NULL, AV_LOG_INFO, "\n");
1566 
1567  /* print verbose per-stream stats */
1568  for (i = 0; i < nb_input_files; i++) {
1569  InputFile *f = input_files[i];
1570  uint64_t total_packets = 0, total_size = 0;
1571 
1572  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573  i, f->ctx->url);
1574 
1575  for (j = 0; j < f->nb_streams; j++) {
1576  InputStream *ist = input_streams[f->ist_index + j];
1577  enum AVMediaType type = ist->dec_ctx->codec_type;
1578 
1579  total_size += ist->data_size;
1580  total_packets += ist->nb_packets;
1581 
1582  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583  i, j, media_type_string(type));
1584  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585  ist->nb_packets, ist->data_size);
1586 
1587  if (ist->decoding_needed) {
1588  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589  ist->frames_decoded);
1590  if (type == AVMEDIA_TYPE_AUDIO)
1591  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592  av_log(NULL, AV_LOG_VERBOSE, "; ");
1593  }
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, "\n");
1596  }
1597 
1598  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599  total_packets, total_size);
1600  }
1601 
1602  for (i = 0; i < nb_output_files; i++) {
1603  OutputFile *of = output_files[i];
1604  uint64_t total_packets = 0, total_size = 0;
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607  i, of->ctx->url);
1608 
1609  for (j = 0; j < of->ctx->nb_streams; j++) {
1610  OutputStream *ost = output_streams[of->ost_index + j];
1611  enum AVMediaType type = ost->enc_ctx->codec_type;
1612 
1613  total_size += ost->data_size;
1614  total_packets += ost->packets_written;
1615 
1616  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617  i, j, media_type_string(type));
1618  if (ost->encoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620  ost->frames_encoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627  ost->packets_written, ost->data_size);
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, "\n");
1630  }
1631 
1632  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633  total_packets, total_size);
1634  }
1635  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637  if (pass1_used) {
1638  av_log(NULL, AV_LOG_WARNING, "\n");
1639  } else {
1640  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1641  }
1642  }
1643 }
1644 
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 {
1647  AVBPrint buf, buf_script;
1648  OutputStream *ost;
1649  AVFormatContext *oc;
1650  int64_t total_size;
1651  AVCodecContext *enc;
1652  int frame_number, vid, i;
1653  double bitrate;
1654  double speed;
1655  int64_t pts = INT64_MIN + 1;
1656  static int64_t last_time = -1;
1657  static int qp_histogram[52];
1658  int hours, mins, secs, us;
1659  const char *hours_sign;
1660  int ret;
1661  float t;
1662 
1663  if (!print_stats && !is_last_report && !progress_avio)
1664  return;
1665 
1666  if (!is_last_report) {
1667  if (last_time == -1) {
1668  last_time = cur_time;
1669  return;
1670  }
1671  if ((cur_time - last_time) < 500000)
1672  return;
1673  last_time = cur_time;
1674  }
1675 
1676  t = (cur_time-timer_start) / 1000000.0;
1677 
1678 
1679  oc = output_files[0]->ctx;
1680 
1681  total_size = avio_size(oc->pb);
1682  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683  total_size = avio_tell(oc->pb);
1684 
1685  vid = 0;
1687  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1688  for (i = 0; i < nb_output_streams; i++) {
1689  float q = -1;
1690  ost = output_streams[i];
1691  enc = ost->enc_ctx;
1692  if (!ost->stream_copy)
1693  q = ost->quality / (float) FF_QP2LAMBDA;
1694 
1695  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696  av_bprintf(&buf, "q=%2.1f ", q);
1697  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698  ost->file_index, ost->index, q);
1699  }
1700  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701  float fps;
1702 
1703  frame_number = ost->frame_number;
1704  fps = t > 1 ? frame_number / t : 0;
1705  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1706  frame_number, fps < 9.95, fps, q);
1707  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1709  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710  ost->file_index, ost->index, q);
1711  if (is_last_report)
1712  av_bprintf(&buf, "L");
1713  if (qp_hist) {
1714  int j;
1715  int qp = lrintf(q);
1716  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717  qp_histogram[qp]++;
1718  for (j = 0; j < 32; j++)
1719  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1720  }
1721 
1722  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723  int j;
1724  double error, error_sum = 0;
1725  double scale, scale_sum = 0;
1726  double p;
1727  char type[3] = { 'Y','U','V' };
1728  av_bprintf(&buf, "PSNR=");
1729  for (j = 0; j < 3; j++) {
1730  if (is_last_report) {
1731  error = enc->error[j];
1732  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733  } else {
1734  error = ost->error[j];
1735  scale = enc->width * enc->height * 255.0 * 255.0;
1736  }
1737  if (j)
1738  scale /= 4;
1739  error_sum += error;
1740  scale_sum += scale;
1741  p = psnr(error / scale);
1742  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1743  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744  ost->file_index, ost->index, type[j] | 32, p);
1745  }
1746  p = psnr(error_sum / scale_sum);
1747  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1748  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749  ost->file_index, ost->index, p);
1750  }
1751  vid = 1;
1752  }
1753  /* compute min output value */
1755  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1756  ost->st->time_base, AV_TIME_BASE_Q));
1757  if (is_last_report)
1758  nb_frames_drop += ost->last_dropped;
1759  }
1760 
1761  secs = FFABS(pts) / AV_TIME_BASE;
1762  us = FFABS(pts) % AV_TIME_BASE;
1763  mins = secs / 60;
1764  secs %= 60;
1765  hours = mins / 60;
1766  mins %= 60;
1767  hours_sign = (pts < 0) ? "-" : "";
1768 
1769  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 
1772  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1773  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1774  if (pts == AV_NOPTS_VALUE) {
1775  av_bprintf(&buf, "N/A ");
1776  } else {
1777  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1778  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1779  }
1780 
1781  if (bitrate < 0) {
1782  av_bprintf(&buf, "bitrate=N/A");
1783  av_bprintf(&buf_script, "bitrate=N/A\n");
1784  }else{
1785  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1786  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787  }
1788 
1789  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791  if (pts == AV_NOPTS_VALUE) {
1792  av_bprintf(&buf_script, "out_time_us=N/A\n");
1793  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1794  av_bprintf(&buf_script, "out_time=N/A\n");
1795  } else {
1796  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1797  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1798  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1799  hours_sign, hours, mins, secs, us);
1800  }
1801 
1803  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1804  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1805  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1806 
1807  if (speed < 0) {
1808  av_bprintf(&buf, " speed=N/A");
1809  av_bprintf(&buf_script, "speed=N/A\n");
1810  } else {
1811  av_bprintf(&buf, " speed=%4.3gx", speed);
1812  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1813  }
1814 
1815  if (print_stats || is_last_report) {
1816  const char end = is_last_report ? '\n' : '\r';
1817  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1818  fprintf(stderr, "%s %c", buf.str, end);
1819  } else
1820  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1821 
1822  fflush(stderr);
1823  }
1824  av_bprint_finalize(&buf, NULL);
1825 
1826  if (progress_avio) {
1827  av_bprintf(&buf_script, "progress=%s\n",
1828  is_last_report ? "end" : "continue");
1829  avio_write(progress_avio, buf_script.str,
1830  FFMIN(buf_script.len, buf_script.size - 1));
1831  avio_flush(progress_avio);
1832  av_bprint_finalize(&buf_script, NULL);
1833  if (is_last_report) {
1834  if ((ret = avio_closep(&progress_avio)) < 0)
1836  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1837  }
1838  }
1839 
1840  if (is_last_report)
1841  print_final_stats(total_size);
1842 }
1843 
1845 {
1846  // We never got any input. Set a fake format, which will
1847  // come from libavformat.
1848  ifilter->format = par->format;
1849  ifilter->sample_rate = par->sample_rate;
1850  ifilter->channels = par->channels;
1851  ifilter->channel_layout = par->channel_layout;
1852  ifilter->width = par->width;
1853  ifilter->height = par->height;
1854  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1855 }
1856 
1857 static void flush_encoders(void)
1858 {
1859  int i, ret;
1860 
1861  for (i = 0; i < nb_output_streams; i++) {
1862  OutputStream *ost = output_streams[i];
1863  AVCodecContext *enc = ost->enc_ctx;
1864  OutputFile *of = output_files[ost->file_index];
1865 
1866  if (!ost->encoding_needed)
1867  continue;
1868 
1869  // Try to enable encoding with no input frames.
1870  // Maybe we should just let encoding fail instead.
1871  if (!ost->initialized) {
1872  FilterGraph *fg = ost->filter->graph;
1873  char error[1024] = "";
1874 
1876  "Finishing stream %d:%d without any data written to it.\n",
1877  ost->file_index, ost->st->index);
1878 
1879  if (ost->filter && !fg->graph) {
1880  int x;
1881  for (x = 0; x < fg->nb_inputs; x++) {
1882  InputFilter *ifilter = fg->inputs[x];
1883  if (ifilter->format < 0)
1884  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1885  }
1886 
1888  continue;
1889 
1890  ret = configure_filtergraph(fg);
1891  if (ret < 0) {
1892  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1893  exit_program(1);
1894  }
1895 
1896  finish_output_stream(ost);
1897  }
1898 
1899  ret = init_output_stream(ost, error, sizeof(error));
1900  if (ret < 0) {
1901  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1902  ost->file_index, ost->index, error);
1903  exit_program(1);
1904  }
1905  }
1906 
1907  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1908  continue;
1909 
1911  continue;
1912 
1913  for (;;) {
1914  const char *desc = NULL;
1915  AVPacket pkt;
1916  int pkt_size;
1917 
1918  switch (enc->codec_type) {
1919  case AVMEDIA_TYPE_AUDIO:
1920  desc = "audio";
1921  break;
1922  case AVMEDIA_TYPE_VIDEO:
1923  desc = "video";
1924  break;
1925  default:
1926  av_assert0(0);
1927  }
1928 
1929  av_init_packet(&pkt);
1930  pkt.data = NULL;
1931  pkt.size = 0;
1932 
1934 
1935  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1936  ret = avcodec_send_frame(enc, NULL);
1937  if (ret < 0) {
1938  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1939  desc,
1940  av_err2str(ret));
1941  exit_program(1);
1942  }
1943  }
1944 
1945  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1946  if (ret < 0 && ret != AVERROR_EOF) {
1947  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948  desc,
1949  av_err2str(ret));
1950  exit_program(1);
1951  }
1952  if (ost->logfile && enc->stats_out) {
1953  fprintf(ost->logfile, "%s", enc->stats_out);
1954  }
1955  if (ret == AVERROR_EOF) {
1956  output_packet(of, &pkt, ost, 1);
1957  break;
1958  }
1959  if (ost->finished & MUXER_FINISHED) {
1960  av_packet_unref(&pkt);
1961  continue;
1962  }
1963  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1964  pkt_size = pkt.size;
1965  output_packet(of, &pkt, ost, 0);
1967  do_video_stats(ost, pkt_size);
1968  }
1969  }
1970  }
1971 }
1972 
1973 /*
1974  * Check whether a packet from ist should be written into ost at this time
1975  */
1977 {
1978  OutputFile *of = output_files[ost->file_index];
1979  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1980 
1981  if (ost->source_index != ist_index)
1982  return 0;
1983 
1984  if (ost->finished)
1985  return 0;
1986 
1987  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988  return 0;
1989 
1990  return 1;
1991 }
1992 
1993 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1994 {
1995  OutputFile *of = output_files[ost->file_index];
1996  InputFile *f = input_files [ist->file_index];
1997  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1998  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1999  AVPacket opkt;
2000 
2001  // EOF: flush output bitstream filters.
2002  if (!pkt) {
2003  av_init_packet(&opkt);
2004  opkt.data = NULL;
2005  opkt.size = 0;
2006  output_packet(of, &opkt, ost, 1);
2007  return;
2008  }
2009 
2010  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2012  return;
2013 
2014  if (!ost->frame_number && !ost->copy_prior_start) {
2015  int64_t comp_start = start_time;
2016  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2017  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2018  if (pkt->pts == AV_NOPTS_VALUE ?
2019  ist->pts < comp_start :
2020  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2021  return;
2022  }
2023 
2024  if (of->recording_time != INT64_MAX &&
2025  ist->pts >= of->recording_time + start_time) {
2026  close_output_stream(ost);
2027  return;
2028  }
2029 
2030  if (f->recording_time != INT64_MAX) {
2031  start_time = f->ctx->start_time;
2032  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2033  start_time += f->start_time;
2034  if (ist->pts >= f->recording_time + start_time) {
2035  close_output_stream(ost);
2036  return;
2037  }
2038  }
2039 
2040  /* force the input stream PTS */
2041  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2042  ost->sync_opts++;
2043 
2044  if (av_packet_ref(&opkt, pkt) < 0)
2045  exit_program(1);
2046 
2047  if (pkt->pts != AV_NOPTS_VALUE)
2048  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2049 
2050  if (pkt->dts == AV_NOPTS_VALUE)
2051  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2052  else
2053  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2054  opkt.dts -= ost_tb_start_time;
2055 
2056  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2058  if(!duration)
2059  duration = ist->dec_ctx->frame_size;
2060  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2061  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2062  ost->mux_timebase) - ost_tb_start_time;
2063  }
2064 
2065  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2066 
2067  output_packet(of, &opkt, ost, 0);
2068 }
2069 
2071 {
2072  AVCodecContext *dec = ist->dec_ctx;
2073 
2074  if (!dec->channel_layout) {
2075  char layout_name[256];
2076 
2077  if (dec->channels > ist->guess_layout_max)
2078  return 0;
2080  if (!dec->channel_layout)
2081  return 0;
2082  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2083  dec->channels, dec->channel_layout);
2084  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2085  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2086  }
2087  return 1;
2088 }
2089 
2090 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2091 {
2092  if (*got_output || ret<0)
2093  decode_error_stat[ret<0] ++;
2094 
2095  if (ret < 0 && exit_on_error)
2096  exit_program(1);
2097 
2098  if (*got_output && ist) {
2101  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2102  if (exit_on_error)
2103  exit_program(1);
2104  }
2105  }
2106 }
2107 
2108 // Filters can be configured only if the formats of all inputs are known.
2110 {
2111  int i;
2112  for (i = 0; i < fg->nb_inputs; i++) {
2113  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2114  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2115  return 0;
2116  }
2117  return 1;
2118 }
2119 
2121 {
2122  FilterGraph *fg = ifilter->graph;
2123  int need_reinit, ret, i;
2124 
2125  /* determine if the parameters for this input changed */
2126  need_reinit = ifilter->format != frame->format;
2127 
2128  switch (ifilter->ist->st->codecpar->codec_type) {
2129  case AVMEDIA_TYPE_AUDIO:
2130  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2131  ifilter->channels != frame->channels ||
2132  ifilter->channel_layout != frame->channel_layout;
2133  break;
2134  case AVMEDIA_TYPE_VIDEO:
2135  need_reinit |= ifilter->width != frame->width ||
2136  ifilter->height != frame->height;
2137  break;
2138  }
2139 
2140  if (!ifilter->ist->reinit_filters && fg->graph)
2141  need_reinit = 0;
2142 
2143  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2144  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2145  need_reinit = 1;
2146 
2147  if (need_reinit) {
2148  ret = ifilter_parameters_from_frame(ifilter, frame);
2149  if (ret < 0)
2150  return ret;
2151  }
2152 
2153  /* (re)init the graph if possible, otherwise buffer the frame and return */
2154  if (need_reinit || !fg->graph) {
2155  for (i = 0; i < fg->nb_inputs; i++) {
2156  if (!ifilter_has_all_input_formats(fg)) {
2157  AVFrame *tmp = av_frame_clone(frame);
2158  if (!tmp)
2159  return AVERROR(ENOMEM);
2160  av_frame_unref(frame);
2161 
2162  if (!av_fifo_space(ifilter->frame_queue)) {
2163  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2164  if (ret < 0) {
2165  av_frame_free(&tmp);
2166  return ret;
2167  }
2168  }
2169  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2170  return 0;
2171  }
2172  }
2173 
2174  ret = reap_filters(1);
2175  if (ret < 0 && ret != AVERROR_EOF) {
2176  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2177  return ret;
2178  }
2179 
2180  ret = configure_filtergraph(fg);
2181  if (ret < 0) {
2182  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2183  return ret;
2184  }
2185  }
2186 
2188  if (ret < 0) {
2189  if (ret != AVERROR_EOF)
2190  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2191  return ret;
2192  }
2193 
2194  return 0;
2195 }
2196 
2197 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2198 {
2199  int ret;
2200 
2201  ifilter->eof = 1;
2202 
2203  if (ifilter->filter) {
2204  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2205  if (ret < 0)
2206  return ret;
2207  } else {
2208  // the filtergraph was never configured
2209  if (ifilter->format < 0)
2210  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2211  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2212  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2213  return AVERROR_INVALIDDATA;
2214  }
2215  }
2216 
2217  return 0;
2218 }
2219 
2220 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2221 // There is the following difference: if you got a frame, you must call
2222 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2223 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2224 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2225 {
2226  int ret;
2227 
2228  *got_frame = 0;
2229 
2230  if (pkt) {
2231  ret = avcodec_send_packet(avctx, pkt);
2232  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2233  // decoded frames with avcodec_receive_frame() until done.
2234  if (ret < 0 && ret != AVERROR_EOF)
2235  return ret;
2236  }
2237 
2238  ret = avcodec_receive_frame(avctx, frame);
2239  if (ret < 0 && ret != AVERROR(EAGAIN))
2240  return ret;
2241  if (ret >= 0)
2242  *got_frame = 1;
2243 
2244  return 0;
2245 }
2246 
2247 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2248 {
2249  int i, ret;
2250  AVFrame *f;
2251 
2252  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2253  for (i = 0; i < ist->nb_filters; i++) {
2254  if (i < ist->nb_filters - 1) {
2255  f = ist->filter_frame;
2256  ret = av_frame_ref(f, decoded_frame);
2257  if (ret < 0)
2258  break;
2259  } else
2260  f = decoded_frame;
2261  ret = ifilter_send_frame(ist->filters[i], f);
2262  if (ret == AVERROR_EOF)
2263  ret = 0; /* ignore */
2264  if (ret < 0) {
2266  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2267  break;
2268  }
2269  }
2270  return ret;
2271 }
2272 
2273 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2274  int *decode_failed)
2275 {
2276  AVFrame *decoded_frame;
2277  AVCodecContext *avctx = ist->dec_ctx;
2278  int ret, err = 0;
2279  AVRational decoded_frame_tb;
2280 
2281  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2282  return AVERROR(ENOMEM);
2283  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2284  return AVERROR(ENOMEM);
2285  decoded_frame = ist->decoded_frame;
2286 
2288  ret = decode(avctx, decoded_frame, got_output, pkt);
2289  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2290  if (ret < 0)
2291  *decode_failed = 1;
2292 
2293  if (ret >= 0 && avctx->sample_rate <= 0) {
2294  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2295  ret = AVERROR_INVALIDDATA;
2296  }
2297 
2298  if (ret != AVERROR_EOF)
2299  check_decode_result(ist, got_output, ret);
2300 
2301  if (!*got_output || ret < 0)
2302  return ret;
2303 
2304  ist->samples_decoded += decoded_frame->nb_samples;
2305  ist->frames_decoded++;
2306 
2307  /* increment next_dts to use for the case where the input stream does not
2308  have timestamps or there are multiple frames in the packet */
2309  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2310  avctx->sample_rate;
2311  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2312  avctx->sample_rate;
2313 
2314  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2315  decoded_frame_tb = ist->st->time_base;
2316  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2317  decoded_frame->pts = pkt->pts;
2318  decoded_frame_tb = ist->st->time_base;
2319  }else {
2320  decoded_frame->pts = ist->dts;
2321  decoded_frame_tb = AV_TIME_BASE_Q;
2322  }
2323  if (decoded_frame->pts != AV_NOPTS_VALUE)
2324  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2325  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2326  (AVRational){1, avctx->sample_rate});
2327  ist->nb_samples = decoded_frame->nb_samples;
2328  err = send_frame_to_filters(ist, decoded_frame);
2329 
2331  av_frame_unref(decoded_frame);
2332  return err < 0 ? err : ret;
2333 }
2334 
2335 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2336  int *decode_failed)
2337 {
2338  AVFrame *decoded_frame;
2339  int i, ret = 0, err = 0;
2340  int64_t best_effort_timestamp;
2341  int64_t dts = AV_NOPTS_VALUE;
2342  AVPacket avpkt;
2343 
2344  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2345  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2346  // skip the packet.
2347  if (!eof && pkt && pkt->size == 0)
2348  return 0;
2349 
2350  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2351  return AVERROR(ENOMEM);
2352  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2353  return AVERROR(ENOMEM);
2354  decoded_frame = ist->decoded_frame;
2355  if (ist->dts != AV_NOPTS_VALUE)
2356  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2357  if (pkt) {
2358  avpkt = *pkt;
2359  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2360  }
2361 
2362  // The old code used to set dts on the drain packet, which does not work
2363  // with the new API anymore.
2364  if (eof) {
2365  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2366  if (!new)
2367  return AVERROR(ENOMEM);
2368  ist->dts_buffer = new;
2369  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2370  }
2371 
2373  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2374  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2375  if (ret < 0)
2376  *decode_failed = 1;
2377 
2378  // The following line may be required in some cases where there is no parser
2379  // or the parser does not has_b_frames correctly
2380  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2381  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2382  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2383  } else
2385  "video_delay is larger in decoder than demuxer %d > %d.\n"
2386  "If you want to help, upload a sample "
2387  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2388  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2389  ist->dec_ctx->has_b_frames,
2390  ist->st->codecpar->video_delay);
2391  }
2392 
2393  if (ret != AVERROR_EOF)
2394  check_decode_result(ist, got_output, ret);
2395 
2396  if (*got_output && ret >= 0) {
2397  if (ist->dec_ctx->width != decoded_frame->width ||
2398  ist->dec_ctx->height != decoded_frame->height ||
2399  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2400  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2401  decoded_frame->width,
2402  decoded_frame->height,
2403  decoded_frame->format,
2404  ist->dec_ctx->width,
2405  ist->dec_ctx->height,
2406  ist->dec_ctx->pix_fmt);
2407  }
2408  }
2409 
2410  if (!*got_output || ret < 0)
2411  return ret;
2412 
2413  if(ist->top_field_first>=0)
2414  decoded_frame->top_field_first = ist->top_field_first;
2415 
2416  ist->frames_decoded++;
2417 
2418  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2419  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2420  if (err < 0)
2421  goto fail;
2422  }
2423  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2424 
2425  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2426  *duration_pts = decoded_frame->pkt_duration;
2427 
2428  if (ist->framerate.num)
2429  best_effort_timestamp = ist->cfr_next_pts++;
2430 
2431  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2432  best_effort_timestamp = ist->dts_buffer[0];
2433 
2434  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2435  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2436  ist->nb_dts_buffer--;
2437  }
2438 
2439  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2440  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2441 
2442  if (ts != AV_NOPTS_VALUE)
2443  ist->next_pts = ist->pts = ts;
2444  }
2445 
2446  if (debug_ts) {
2447  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2448  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2449  ist->st->index, av_ts2str(decoded_frame->pts),
2450  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2451  best_effort_timestamp,
2452  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2453  decoded_frame->key_frame, decoded_frame->pict_type,
2454  ist->st->time_base.num, ist->st->time_base.den);
2455  }
2456 
2457  if (ist->st->sample_aspect_ratio.num)
2458  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2459 
2460  err = send_frame_to_filters(ist, decoded_frame);
2461 
2462 fail:
2464  av_frame_unref(decoded_frame);
2465  return err < 0 ? err : ret;
2466 }
2467 
2468 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2469  int *decode_failed)
2470 {
2471  AVSubtitle subtitle;
2472  int free_sub = 1;
2473  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2474  &subtitle, got_output, pkt);
2475 
2476  check_decode_result(NULL, got_output, ret);
2477 
2478  if (ret < 0 || !*got_output) {
2479  *decode_failed = 1;
2480  if (!pkt->size)
2481  sub2video_flush(ist);
2482  return ret;
2483  }
2484 
2485  if (ist->fix_sub_duration) {
2486  int end = 1;
2487  if (ist->prev_sub.got_output) {
2488  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2489  1000, AV_TIME_BASE);
2490  if (end < ist->prev_sub.subtitle.end_display_time) {
2491  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2492  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2494  end <= 0 ? ", dropping it" : "");
2496  }
2497  }
2498  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2499  FFSWAP(int, ret, ist->prev_sub.ret);
2500  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2501  if (end <= 0)
2502  goto out;
2503  }
2504 
2505  if (!*got_output)
2506  return ret;
2507 
2508  if (ist->sub2video.frame) {
2509  sub2video_update(ist, &subtitle);
2510  } else if (ist->nb_filters) {
2511  if (!ist->sub2video.sub_queue)
2512  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2513  if (!ist->sub2video.sub_queue)
2514  exit_program(1);
2515  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2517  if (ret < 0)
2518  exit_program(1);
2519  }
2520  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2521  free_sub = 0;
2522  }
2523 
2524  if (!subtitle.num_rects)
2525  goto out;
2526 
2527  ist->frames_decoded++;
2528 
2529  for (i = 0; i < nb_output_streams; i++) {
2530  OutputStream *ost = output_streams[i];
2531 
2532  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2533  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2534  continue;
2535 
2536  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2537  }
2538 
2539 out:
2540  if (free_sub)
2541  avsubtitle_free(&subtitle);
2542  return ret;
2543 }
2544 
2546 {
2547  int i, ret;
2548  /* TODO keep pts also in stream time base to avoid converting back */
2549  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2551 
2552  for (i = 0; i < ist->nb_filters; i++) {
2553  ret = ifilter_send_eof(ist->filters[i], pts);
2554  if (ret < 0)
2555  return ret;
2556  }
2557  return 0;
2558 }
2559 
2560 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2561 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2562 {
2563  int ret = 0, i;
2564  int repeating = 0;
2565  int eof_reached = 0;
2566 
2567  AVPacket avpkt;
2568  if (!ist->saw_first_ts) {
2569  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2570  ist->pts = 0;
2571  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2572  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2573  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2574  }
2575  ist->saw_first_ts = 1;
2576  }
2577 
2578  if (ist->next_dts == AV_NOPTS_VALUE)
2579  ist->next_dts = ist->dts;
2580  if (ist->next_pts == AV_NOPTS_VALUE)
2581  ist->next_pts = ist->pts;
2582 
2583  if (!pkt) {
2584  /* EOF handling */
2585  av_init_packet(&avpkt);
2586  avpkt.data = NULL;
2587  avpkt.size = 0;
2588  } else {
2589  avpkt = *pkt;
2590  }
2591 
2592  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2593  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2594  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2595  ist->next_pts = ist->pts = ist->dts;
2596  }
2597 
2598  // while we have more to decode or while the decoder did output something on EOF
2599  while (ist->decoding_needed) {
2600  int64_t duration_dts = 0;
2601  int64_t duration_pts = 0;
2602  int got_output = 0;
2603  int decode_failed = 0;
2604 
2605  ist->pts = ist->next_pts;
2606  ist->dts = ist->next_dts;
2607 
2608  switch (ist->dec_ctx->codec_type) {
2609  case AVMEDIA_TYPE_AUDIO:
2610  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2611  &decode_failed);
2612  break;
2613  case AVMEDIA_TYPE_VIDEO:
2614  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2615  &decode_failed);
2616  if (!repeating || !pkt || got_output) {
2617  if (pkt && pkt->duration) {
2618  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2619  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2621  duration_dts = ((int64_t)AV_TIME_BASE *
2622  ist->dec_ctx->framerate.den * ticks) /
2624  }
2625 
2626  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2627  ist->next_dts += duration_dts;
2628  }else
2629  ist->next_dts = AV_NOPTS_VALUE;
2630  }
2631 
2632  if (got_output) {
2633  if (duration_pts > 0) {
2634  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2635  } else {
2636  ist->next_pts += duration_dts;
2637  }
2638  }
2639  break;
2640  case AVMEDIA_TYPE_SUBTITLE:
2641  if (repeating)
2642  break;
2643  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2644  if (!pkt && ret >= 0)
2645  ret = AVERROR_EOF;
2646  break;
2647  default:
2648  return -1;
2649  }
2650 
2651  if (ret == AVERROR_EOF) {
2652  eof_reached = 1;
2653  break;
2654  }
2655 
2656  if (ret < 0) {
2657  if (decode_failed) {
2658  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2659  ist->file_index, ist->st->index, av_err2str(ret));
2660  } else {
2661  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2662  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2663  }
2664  if (!decode_failed || exit_on_error)
2665  exit_program(1);
2666  break;
2667  }
2668 
2669  if (got_output)
2670  ist->got_output = 1;
2671 
2672  if (!got_output)
2673  break;
2674 
2675  // During draining, we might get multiple output frames in this loop.
2676  // ffmpeg.c does not drain the filter chain on configuration changes,
2677  // which means if we send multiple frames at once to the filters, and
2678  // one of those frames changes configuration, the buffered frames will
2679  // be lost. This can upset certain FATE tests.
2680  // Decode only 1 frame per call on EOF to appease these FATE tests.
2681  // The ideal solution would be to rewrite decoding to use the new
2682  // decoding API in a better way.
2683  if (!pkt)
2684  break;
2685 
2686  repeating = 1;
2687  }
2688 
2689  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2690  /* except when looping we need to flush but not to send an EOF */
2691  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2692  int ret = send_filter_eof(ist);
2693  if (ret < 0) {
2694  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2695  exit_program(1);
2696  }
2697  }
2698 
2699  /* handle stream copy */
2700  if (!ist->decoding_needed && pkt) {
2701  ist->dts = ist->next_dts;
2702  switch (ist->dec_ctx->codec_type) {
2703  case AVMEDIA_TYPE_AUDIO:
2704  av_assert1(pkt->duration >= 0);
2705  if (ist->dec_ctx->sample_rate) {
2706  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2707  ist->dec_ctx->sample_rate;
2708  } else {
2709  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2710  }
2711  break;
2712  case AVMEDIA_TYPE_VIDEO:
2713  if (ist->framerate.num) {
2714  // TODO: Remove work-around for c99-to-c89 issue 7
2715  AVRational time_base_q = AV_TIME_BASE_Q;
2716  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2717  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2718  } else if (pkt->duration) {
2719  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2720  } else if(ist->dec_ctx->framerate.num != 0) {
2721  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2722  ist->next_dts += ((int64_t)AV_TIME_BASE *
2723  ist->dec_ctx->framerate.den * ticks) /
2725  }
2726  break;
2727  }
2728  ist->pts = ist->dts;
2729  ist->next_pts = ist->next_dts;
2730  }
2731  for (i = 0; i < nb_output_streams; i++) {
2732  OutputStream *ost = output_streams[i];
2733 
2734  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2735  continue;
2736 
2737  do_streamcopy(ist, ost, pkt);
2738  }
2739 
2740  return !eof_reached;
2741 }
2742 
2743 static void print_sdp(void)
2744 {
2745  char sdp[16384];
2746  int i;
2747  int j;
2748  AVIOContext *sdp_pb;
2749  AVFormatContext **avc;
2750 
2751  for (i = 0; i < nb_output_files; i++) {
2752  if (!output_files[i]->header_written)
2753  return;
2754  }
2755 
2756  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2757  if (!avc)
2758  exit_program(1);
2759  for (i = 0, j = 0; i < nb_output_files; i++) {
2760  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2761  avc[j] = output_files[i]->ctx;
2762  j++;
2763  }
2764  }
2765 
2766  if (!j)
2767  goto fail;
2768 
2769  av_sdp_create(avc, j, sdp, sizeof(sdp));
2770 
2771  if (!sdp_filename) {
2772  printf("SDP:\n%s\n", sdp);
2773  fflush(stdout);
2774  } else {
2775  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2776  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2777  } else {
2778  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2779  avio_closep(&sdp_pb);
2781  }
2782  }
2783 
2784 fail:
2785  av_freep(&avc);
2786 }
2787 
2789 {
2790  InputStream *ist = s->opaque;
2791  const enum AVPixelFormat *p;
2792  int ret;
2793 
2794  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2796  const AVCodecHWConfig *config = NULL;
2797  int i;
2798 
2799  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2800  break;
2801 
2802  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2803  ist->hwaccel_id == HWACCEL_AUTO) {
2804  for (i = 0;; i++) {
2805  config = avcodec_get_hw_config(s->codec, i);
2806  if (!config)
2807  break;
2808  if (!(config->methods &
2810  continue;
2811  if (config->pix_fmt == *p)
2812  break;
2813  }
2814  }
2815  if (config) {
2816  if (config->device_type != ist->hwaccel_device_type) {
2817  // Different hwaccel offered, ignore.
2818  continue;
2819  }
2820 
2821  ret = hwaccel_decode_init(s);
2822  if (ret < 0) {
2823  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2825  "%s hwaccel requested for input stream #%d:%d, "
2826  "but cannot be initialized.\n",
2828  ist->file_index, ist->st->index);
2829  return AV_PIX_FMT_NONE;
2830  }
2831  continue;
2832  }
2833  } else {
2834  const HWAccel *hwaccel = NULL;
2835  int i;
2836  for (i = 0; hwaccels[i].name; i++) {
2837  if (hwaccels[i].pix_fmt == *p) {
2838  hwaccel = &hwaccels[i];
2839  break;
2840  }
2841  }
2842  if (!hwaccel) {
2843  // No hwaccel supporting this pixfmt.
2844  continue;
2845  }
2846  if (hwaccel->id != ist->hwaccel_id) {
2847  // Does not match requested hwaccel.
2848  continue;
2849  }
2850 
2851  ret = hwaccel->init(s);
2852  if (ret < 0) {
2854  "%s hwaccel requested for input stream #%d:%d, "
2855  "but cannot be initialized.\n", hwaccel->name,
2856  ist->file_index, ist->st->index);
2857  return AV_PIX_FMT_NONE;
2858  }
2859  }
2860 
2861  if (ist->hw_frames_ctx) {
2863  if (!s->hw_frames_ctx)
2864  return AV_PIX_FMT_NONE;
2865  }
2866 
2867  ist->hwaccel_pix_fmt = *p;
2868  break;
2869  }
2870 
2871  return *p;
2872 }
2873 
2875 {
2876  InputStream *ist = s->opaque;
2877 
2878  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2879  return ist->hwaccel_get_buffer(s, frame, flags);
2880 
2881  return avcodec_default_get_buffer2(s, frame, flags);
2882 }
2883 
2884 static int init_input_stream(int ist_index, char *error, int error_len)
2885 {
2886  int ret;
2887  InputStream *ist = input_streams[ist_index];
2888 
2889  if (ist->decoding_needed) {
2890  AVCodec *codec = ist->dec;
2891  if (!codec) {
2892  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2893  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2894  return AVERROR(EINVAL);
2895  }
2896 
2897  ist->dec_ctx->opaque = ist;
2898  ist->dec_ctx->get_format = get_format;
2899  ist->dec_ctx->get_buffer2 = get_buffer;
2900  ist->dec_ctx->thread_safe_callbacks = 1;
2901 
2902  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2903  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2904  (ist->decoding_needed & DECODING_FOR_OST)) {
2905  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2907  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2908  }
2909 
2910  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2911 
2912  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2913  * audio, and video decoders such as cuvid or mediacodec */
2914  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2915 
2916  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2917  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2918  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2920  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2921 
2922  ret = hw_device_setup_for_decode(ist);
2923  if (ret < 0) {
2924  snprintf(error, error_len, "Device setup failed for "
2925  "decoder on input stream #%d:%d : %s",
2926  ist->file_index, ist->st->index, av_err2str(ret));
2927  return ret;
2928  }
2929 
2930  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2931  if (ret == AVERROR_EXPERIMENTAL)
2932  abort_codec_experimental(codec, 0);
2933 
2934  snprintf(error, error_len,
2935  "Error while opening decoder for input stream "
2936  "#%d:%d : %s",
2937  ist->file_index, ist->st->index, av_err2str(ret));
2938  return ret;
2939  }
2941  }
2942 
2943  ist->next_pts = AV_NOPTS_VALUE;
2944  ist->next_dts = AV_NOPTS_VALUE;
2945 
2946  return 0;
2947 }
2948 
2950 {
2951  if (ost->source_index >= 0)
2952  return input_streams[ost->source_index];
2953  return NULL;
2954 }
2955 
2956 static int compare_int64(const void *a, const void *b)
2957 {
2958  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2959 }
2960 
2961 /* open the muxer when all the streams are initialized */
2962 static int check_init_output_file(OutputFile *of, int file_index)
2963 {
2964  int ret, i;
2965 
2966  for (i = 0; i < of->ctx->nb_streams; i++) {
2967  OutputStream *ost = output_streams[of->ost_index + i];
2968  if (!ost->initialized)
2969  return 0;
2970  }
2971 
2972  of->ctx->interrupt_callback = int_cb;
2973 
2974  ret = avformat_write_header(of->ctx, &of->opts);
2975  if (ret < 0) {
2977  "Could not write header for output file #%d "
2978  "(incorrect codec parameters ?): %s\n",
2979  file_index, av_err2str(ret));
2980  return ret;
2981  }
2982  //assert_avoptions(of->opts);
2983  of->header_written = 1;
2984 
2985  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2986 
2987  if (sdp_filename || want_sdp)
2988  print_sdp();
2989 
2990  /* flush the muxing queues */
2991  for (i = 0; i < of->ctx->nb_streams; i++) {
2992  OutputStream *ost = output_streams[of->ost_index + i];
2993 
2994  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2995  if (!av_fifo_size(ost->muxing_queue))
2996  ost->mux_timebase = ost->st->time_base;
2997 
2998  while (av_fifo_size(ost->muxing_queue)) {
2999  AVPacket pkt;
3000  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3001  write_packet(of, &pkt, ost, 1);
3002  }
3003  }
3004 
3005  return 0;
3006 }
3007 
3009 {
3010  AVBSFContext *ctx;
3011  int i, ret;
3012 
3013  if (!ost->nb_bitstream_filters)
3014  return 0;
3015 
3016  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3017  ctx = ost->bsf_ctx[i];
3018 
3019  ret = avcodec_parameters_copy(ctx->par_in,
3020  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3021  if (ret < 0)
3022  return ret;
3023 
3024  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3025 
3026  ret = av_bsf_init(ctx);
3027  if (ret < 0) {
3028  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3029  ost->bsf_ctx[i]->filter->name);
3030  return ret;
3031  }
3032  }
3033 
3034  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3035  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3036  if (ret < 0)
3037  return ret;
3038 
3039  ost->st->time_base = ctx->time_base_out;
3040 
3041  return 0;
3042 }
3043 
3045 {
3046  OutputFile *of = output_files[ost->file_index];
3047  InputStream *ist = get_input_stream(ost);
3048  AVCodecParameters *par_dst = ost->st->codecpar;
3049  AVCodecParameters *par_src = ost->ref_par;
3050  AVRational sar;
3051  int i, ret;
3052  uint32_t codec_tag = par_dst->codec_tag;
3053 
3054  av_assert0(ist && !ost->filter);
3055 
3056  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3057  if (ret >= 0)
3058  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3059  if (ret < 0) {
3061  "Error setting up codec context options.\n");
3062  return ret;
3063  }
3064 
3065  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3066  if (ret < 0) {
3068  "Error getting reference codec parameters.\n");
3069  return ret;
3070  }
3071 
3072  if (!codec_tag) {
3073  unsigned int codec_tag_tmp;
3074  if (!of->ctx->oformat->codec_tag ||
3075  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3076  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3077  codec_tag = par_src->codec_tag;
3078  }
3079 
3080  ret = avcodec_parameters_copy(par_dst, par_src);
3081  if (ret < 0)
3082  return ret;
3083 
3084  par_dst->codec_tag = codec_tag;
3085 
3086  if (!ost->frame_rate.num)
3087  ost->frame_rate = ist->framerate;
3088  ost->st->avg_frame_rate = ost->frame_rate;
3089 
3091  if (ret < 0)
3092  return ret;
3093 
3094  // copy timebase while removing common factors
3095  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3097 
3098  // copy estimated duration as a hint to the muxer
3099  if (ost->st->duration <= 0 && ist->st->duration > 0)
3100  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3101 
3102  // copy disposition
3103  ost->st->disposition = ist->st->disposition;
3104 
3105  if (ist->st->nb_side_data) {
3106  for (i = 0; i < ist->st->nb_side_data; i++) {
3107  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3108  uint8_t *dst_data;
3109 
3110  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3111  if (!dst_data)
3112  return AVERROR(ENOMEM);
3113  memcpy(dst_data, sd_src->data, sd_src->size);
3114  }
3115  }
3116 
3117  if (ost->rotate_overridden) {
3119  sizeof(int32_t) * 9);
3120  if (sd)
3122  }
3123 
3124  switch (par_dst->codec_type) {
3125  case AVMEDIA_TYPE_AUDIO:
3126  if (audio_volume != 256) {
3127  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3128  exit_program(1);
3129  }
3130  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3131  par_dst->block_align= 0;
3132  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3133  par_dst->block_align= 0;
3134  break;
3135  case AVMEDIA_TYPE_VIDEO:
3136  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3137  sar =
3139  (AVRational){ par_dst->height, par_dst->width });
3140  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3141  "with stream copy may produce invalid files\n");
3142  }
3143  else if (ist->st->sample_aspect_ratio.num)
3144  sar = ist->st->sample_aspect_ratio;
3145  else
3146  sar = par_src->sample_aspect_ratio;
3147  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3148  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3149  ost->st->r_frame_rate = ist->st->r_frame_rate;
3150  break;
3151  }
3152 
3153  ost->mux_timebase = ist->st->time_base;
3154 
3155  return 0;
3156 }
3157 
3159 {
3160  AVDictionaryEntry *e;
3161 
3162  uint8_t *encoder_string;
3163  int encoder_string_len;
3164  int format_flags = 0;
3165  int codec_flags = ost->enc_ctx->flags;
3166 
3167  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3168  return;
3169 
3170  e = av_dict_get(of->opts, "fflags", NULL, 0);
3171  if (e) {
3172  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3173  if (!o)
3174  return;
3175  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3176  }
3177  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3178  if (e) {
3179  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3180  if (!o)
3181  return;
3182  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3183  }
3184 
3185  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3186  encoder_string = av_mallocz(encoder_string_len);
3187  if (!encoder_string)
3188  exit_program(1);
3189 
3190  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3191  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3192  else
3193  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3194  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3195  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3197 }
3198 
3199 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3200  AVCodecContext *avctx)
3201 {
3202  char *p;
3203  int n = 1, i, size, index = 0;
3204  int64_t t, *pts;
3205 
3206  for (p = kf; *p; p++)
3207  if (*p == ',')
3208  n++;
3209  size = n;
3210  pts = av_malloc_array(size, sizeof(*pts));
3211  if (!pts) {
3212  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3213  exit_program(1);
3214  }
3215 
3216  p = kf;
3217  for (i = 0; i < n; i++) {
3218  char *next = strchr(p, ',');
3219 
3220  if (next)
3221  *next++ = 0;
3222 
3223  if (!memcmp(p, "chapters", 8)) {
3224 
3225  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3226  int j;
3227 
3228  if (avf->nb_chapters > INT_MAX - size ||
3229  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3230  sizeof(*pts)))) {
3232  "Could not allocate forced key frames array.\n");
3233  exit_program(1);
3234  }
3235  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3236  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3237 
3238  for (j = 0; j < avf->nb_chapters; j++) {
3239  AVChapter *c = avf->chapters[j];
3240  av_assert1(index < size);
3241  pts[index++] = av_rescale_q(c->start, c->time_base,
3242  avctx->time_base) + t;
3243  }
3244 
3245  } else {
3246 
3247  t = parse_time_or_die("force_key_frames", p, 1);
3248  av_assert1(index < size);
3249  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3250 
3251  }
3252 
3253  p = next;
3254  }
3255 
3256  av_assert0(index == size);
3257  qsort(pts, size, sizeof(*pts), compare_int64);
3258  ost->forced_kf_count = size;
3259  ost->forced_kf_pts = pts;
3260 }
3261 
3262 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3263 {
3264  InputStream *ist = get_input_stream(ost);
3265  AVCodecContext *enc_ctx = ost->enc_ctx;
3266  AVFormatContext *oc;
3267 
3268  if (ost->enc_timebase.num > 0) {
3269  enc_ctx->time_base = ost->enc_timebase;
3270  return;
3271  }
3272 
3273  if (ost->enc_timebase.num < 0) {
3274  if (ist) {
3275  enc_ctx->time_base = ist->st->time_base;
3276  return;
3277  }
3278 
3279  oc = output_files[ost->file_index]->ctx;
3280  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3281  }
3282 
3283  enc_ctx->time_base = default_time_base;
3284 }
3285 
3287 {
3288  InputStream *ist = get_input_stream(ost);
3289  AVCodecContext *enc_ctx = ost->enc_ctx;
3291  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3292  int j, ret;
3293 
3294  set_encoder_id(output_files[ost->file_index], ost);
3295 
3296  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3297  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3298  // which have to be filtered out to prevent leaking them to output files.
3299  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3300 
3301  if (ist) {
3302  ost->st->disposition = ist->st->disposition;
3303 
3304  dec_ctx = ist->dec_ctx;
3305 
3306  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3307  } else {
3308  for (j = 0; j < oc->nb_streams; j++) {
3309  AVStream *st = oc->streams[j];
3310  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3311  break;
3312  }
3313  if (j == oc->nb_streams)
3314  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3317  }
3318 
3319  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3320  if (!ost->frame_rate.num)
3322  if (ist && !ost->frame_rate.num)
3323  ost->frame_rate = ist->framerate;
3324  if (ist && !ost->frame_rate.num)
3325  ost->frame_rate = ist->st->r_frame_rate;
3326  if (ist && !ost->frame_rate.num) {
3327  ost->frame_rate = (AVRational){25, 1};
3329  "No information "
3330  "about the input framerate is available. Falling "
3331  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3332  "if you want a different framerate.\n",
3333  ost->file_index, ost->index);
3334  }
3335 
3336  if (ost->enc->supported_framerates && !ost->force_fps) {
3337  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3338  ost->frame_rate = ost->enc->supported_framerates[idx];
3339  }
3340  // reduce frame rate for mpeg4 to be within the spec limits
3341  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3342  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3343  ost->frame_rate.num, ost->frame_rate.den, 65535);
3344  }
3345  }
3346 
3347  switch (enc_ctx->codec_type) {
3348  case AVMEDIA_TYPE_AUDIO:
3350  if (dec_ctx)
3351  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3352  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3356 
3357  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3358  break;
3359 
3360  case AVMEDIA_TYPE_VIDEO:
3362 
3363  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3365  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3367  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3368  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3369  }
3370  for (j = 0; j < ost->forced_kf_count; j++)
3371  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3373  enc_ctx->time_base);
3374 
3375  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3376  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3377  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3378  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3379  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3381 
3382  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3383  if (dec_ctx)
3384  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3385  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3386 
3387  enc_ctx->framerate = ost->frame_rate;
3388 
3389  ost->st->avg_frame_rate = ost->frame_rate;
3390 
3391  if (!dec_ctx ||
3392  enc_ctx->width != dec_ctx->width ||
3393  enc_ctx->height != dec_ctx->height ||
3394  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3396  }
3397 
3398  if (ost->top_field_first == 0) {
3399  enc_ctx->field_order = AV_FIELD_BB;
3400  } else if (ost->top_field_first == 1) {
3401  enc_ctx->field_order = AV_FIELD_TT;
3402  }
3403 
3404  if (ost->forced_keyframes) {
3405  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3408  if (ret < 0) {
3410  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3411  return ret;
3412  }
3417 
3418  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3419  // parse it only for static kf timings
3420  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3422  }
3423  }
3424  break;
3425  case AVMEDIA_TYPE_SUBTITLE:
3426  enc_ctx->time_base = AV_TIME_BASE_Q;
3427  if (!enc_ctx->width) {
3428  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3429  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3430  }
3431  break;
3432  case AVMEDIA_TYPE_DATA:
3433  break;
3434  default:
3435  abort();
3436  break;
3437  }
3438 
3439  ost->mux_timebase = enc_ctx->time_base;
3440 
3441  return 0;
3442 }
3443 
3444 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3445 {
3446  int ret = 0;
3447 
3448  if (ost->encoding_needed) {
3449  AVCodec *codec = ost->enc;
3450  AVCodecContext *dec = NULL;
3451  InputStream *ist;
3452 
3453  ret = init_output_stream_encode(ost);
3454  if (ret < 0)
3455  return ret;
3456 
3457  if ((ist = get_input_stream(ost)))
3458  dec = ist->dec_ctx;
3459  if (dec && dec->subtitle_header) {
3460  /* ASS code assumes this buffer is null terminated so add extra byte. */
3462  if (!ost->enc_ctx->subtitle_header)
3463  return AVERROR(ENOMEM);
3464  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3466  }
3467  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3468  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3469  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3470  !codec->defaults &&
3471  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3472  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3473  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3474 
3475  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3479  if (!ost->enc_ctx->hw_frames_ctx)
3480  return AVERROR(ENOMEM);
3481  } else {
3482  ret = hw_device_setup_for_encode(ost);
3483  if (ret < 0) {
3484  snprintf(error, error_len, "Device setup failed for "
3485  "encoder on output stream #%d:%d : %s",
3486  ost->file_index, ost->index, av_err2str(ret));
3487  return ret;
3488  }
3489  }
3490  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3491  int input_props = 0, output_props = 0;
3492  AVCodecDescriptor const *input_descriptor =
3494  AVCodecDescriptor const *output_descriptor =
3496  if (input_descriptor)
3497  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3498  if (output_descriptor)
3499  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3500  if (input_props && output_props && input_props != output_props) {
3501  snprintf(error, error_len,
3502  "Subtitle encoding currently only possible from text to text "
3503  "or bitmap to bitmap");
3504  return AVERROR_INVALIDDATA;
3505  }
3506  }
3507 
3508  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3509  if (ret == AVERROR_EXPERIMENTAL)
3510  abort_codec_experimental(codec, 1);
3511  snprintf(error, error_len,
3512  "Error while opening encoder for output stream #%d:%d - "
3513  "maybe incorrect parameters such as bit_rate, rate, width or height",
3514  ost->file_index, ost->index);
3515  return ret;
3516  }
3517  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3518  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3520  ost->enc_ctx->frame_size);
3522  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3523  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3524  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3525  " It takes bits/s as argument, not kbits/s\n");
3526 
3528  if (ret < 0) {
3530  "Error initializing the output stream codec context.\n");
3531  exit_program(1);
3532  }
3533  /*
3534  * FIXME: ost->st->codec should't be needed here anymore.
3535  */
3536  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3537  if (ret < 0)
3538  return ret;
3539 
3540  if (ost->enc_ctx->nb_coded_side_data) {
3541  int i;
3542 
3543  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3544  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3545  uint8_t *dst_data;
3546 
3547  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3548  if (!dst_data)
3549  return AVERROR(ENOMEM);
3550  memcpy(dst_data, sd_src->data, sd_src->size);
3551  }
3552  }
3553 
3554  /*
3555  * Add global input side data. For now this is naive, and copies it
3556  * from the input stream's global side data. All side data should
3557  * really be funneled over AVFrame and libavfilter, then added back to
3558  * packet side data, and then potentially using the first packet for
3559  * global side data.
3560  */
3561  if (ist) {
3562  int i;
3563  for (i = 0; i < ist->st->nb_side_data; i++) {
3564  AVPacketSideData *sd = &ist->st->side_data[i];
3565  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3566  if (!dst)
3567  return AVERROR(ENOMEM);
3568  memcpy(dst, sd->data, sd->size);
3569  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3570  av_display_rotation_set((uint32_t *)dst, 0);
3571  }
3572  }
3573 
3574  // copy timebase while removing common factors
3575  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3576  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3577 
3578  // copy estimated duration as a hint to the muxer
3579  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3580  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3581 
3582  ost->st->codec->codec= ost->enc_ctx->codec;
3583  } else if (ost->stream_copy) {
3584  ret = init_output_stream_streamcopy(ost);
3585  if (ret < 0)
3586  return ret;
3587  }
3588 
3589  // parse user provided disposition, and update stream values
3590  if (ost->disposition) {
3591  static const AVOption opts[] = {
3592  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3593  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3594  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3595  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3596  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3597  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3598  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3599  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3600  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3601  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3602  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3603  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3604  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3605  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3606  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3607  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3608  { NULL },
3609  };
3610  static const AVClass class = {
3611  .class_name = "",
3612  .item_name = av_default_item_name,
3613  .option = opts,
3614  .version = LIBAVUTIL_VERSION_INT,
3615  };
3616  const AVClass *pclass = &class;
3617 
3618  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3619  if (ret < 0)
3620  return ret;
3621  }
3622 
3623  /* initialize bitstream filters for the output stream
3624  * needs to be done here, because the codec id for streamcopy is not
3625  * known until now */
3626  ret = init_output_bsfs(ost);
3627  if (ret < 0)
3628  return ret;
3629 
3630  ost->initialized = 1;
3631 
3632  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3633  if (ret < 0)
3634  return ret;
3635 
3636  return ret;
3637 }
3638 
3639 static void report_new_stream(int input_index, AVPacket *pkt)
3640 {
3641  InputFile *file = input_files[input_index];
3642  AVStream *st = file->ctx->streams[pkt->stream_index];
3643 
3644  if (pkt->stream_index < file->nb_streams_warn)
3645  return;
3646  av_log(file->ctx, AV_LOG_WARNING,
3647  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3649  input_index, pkt->stream_index,
3650  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3651  file->nb_streams_warn = pkt->stream_index + 1;
3652 }
3653 
3654 static int transcode_init(void)
3655 {
3656  int ret = 0, i, j, k;
3657  AVFormatContext *oc;
3658  OutputStream *ost;
3659  InputStream *ist;
3660  char error[1024] = {0};
3661 
3662  for (i = 0; i < nb_filtergraphs; i++) {
3663  FilterGraph *fg = filtergraphs[i];
3664  for (j = 0; j < fg->nb_outputs; j++) {
3665  OutputFilter *ofilter = fg->outputs[j];
3666  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3667  continue;
3668  if (fg->nb_inputs != 1)
3669  continue;
3670  for (k = nb_input_streams-1; k >= 0 ; k--)
3671  if (fg->inputs[0]->ist == input_streams[k])
3672  break;
3673  ofilter->ost->source_index = k;
3674  }
3675  }
3676 
3677  /* init framerate emulation */
3678  for (i = 0; i < nb_input_files; i++) {
3679  InputFile *ifile = input_files[i];
3680  if (ifile->rate_emu)
3681  for (j = 0; j < ifile->nb_streams; j++)
3682  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3683  }
3684 
3685  /* init input streams */
3686  for (i = 0; i < nb_input_streams; i++)
3687  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3688  for (i = 0; i < nb_output_streams; i++) {
3689  ost = output_streams[i];
3690  avcodec_close(ost->enc_ctx);
3691  }
3692  goto dump_format;
3693  }
3694 
3695  /* open each encoder */
3696  for (i = 0; i < nb_output_streams; i++) {
3697  // skip streams fed from filtergraphs until we have a frame for them
3698  if (output_streams[i]->filter)
3699  continue;
3700 
3701  ret = init_output_stream(output_streams[i], error, sizeof(error));
3702  if (ret < 0)
3703  goto dump_format;
3704  }
3705 
3706  /* discard unused programs */
3707  for (i = 0; i < nb_input_files; i++) {
3708  InputFile *ifile = input_files[i];
3709  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3710  AVProgram *p = ifile->ctx->programs[j];
3711  int discard = AVDISCARD_ALL;
3712 
3713  for (k = 0; k < p->nb_stream_indexes; k++)
3714  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3715  discard = AVDISCARD_DEFAULT;
3716  break;
3717  }
3718  p->discard = discard;
3719  }
3720  }
3721 
3722  /* write headers for files with no streams */
3723  for (i = 0; i < nb_output_files; i++) {
3724  oc = output_files[i]->ctx;
3725  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3726  ret = check_init_output_file(output_files[i], i);
3727  if (ret < 0)
3728  goto dump_format;
3729  }
3730  }
3731 
3732  dump_format:
3733  /* dump the stream mapping */
3734  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3735  for (i = 0; i < nb_input_streams; i++) {
3736  ist = input_streams[i];
3737 
3738  for (j = 0; j < ist->nb_filters; j++) {
3739  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3740  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3741  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3742  ist->filters[j]->name);
3743  if (nb_filtergraphs > 1)
3744  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3745  av_log(NULL, AV_LOG_INFO, "\n");
3746  }
3747  }
3748  }
3749 
3750  for (i = 0; i < nb_output_streams; i++) {
3751  ost = output_streams[i];
3752 
3753  if (ost->attachment_filename) {
3754  /* an attached file */
3755  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3756  ost->attachment_filename, ost->file_index, ost->index);
3757  continue;
3758  }
3759 
3760  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3761  /* output from a complex graph */
3762  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3763  if (nb_filtergraphs > 1)
3764  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3765 
3766  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3767  ost->index, ost->enc ? ost->enc->name : "?");
3768  continue;
3769  }
3770 
3771  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3772  input_streams[ost->source_index]->file_index,
3773  input_streams[ost->source_index]->st->index,
3774  ost->file_index,
3775  ost->index);
3776  if (ost->sync_ist != input_streams[ost->source_index])
3777  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3778  ost->sync_ist->file_index,
3779  ost->sync_ist->st->index);
3780  if (ost->stream_copy)
3781  av_log(NULL, AV_LOG_INFO, " (copy)");
3782  else {
3783  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3784  const AVCodec *out_codec = ost->enc;
3785  const char *decoder_name = "?";
3786  const char *in_codec_name = "?";
3787  const char *encoder_name = "?";
3788  const char *out_codec_name = "?";
3789  const AVCodecDescriptor *desc;
3790 
3791  if (in_codec) {
3792  decoder_name = in_codec->name;
3793  desc = avcodec_descriptor_get(in_codec->id);
3794  if (desc)
3795  in_codec_name = desc->name;
3796  if (!strcmp(decoder_name, in_codec_name))
3797  decoder_name = "native";
3798  }
3799 
3800  if (out_codec) {
3801  encoder_name = out_codec->name;
3802  desc = avcodec_descriptor_get(out_codec->id);
3803  if (desc)
3804  out_codec_name = desc->name;
3805  if (!strcmp(encoder_name, out_codec_name))
3806  encoder_name = "native";
3807  }
3808 
3809  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3810  in_codec_name, decoder_name,
3811  out_codec_name, encoder_name);
3812  }
3813  av_log(NULL, AV_LOG_INFO, "\n");
3814  }
3815 
3816  if (ret) {
3817  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3818  return ret;
3819  }
3820 
3822 
3823  return 0;
3824 }
3825 
3826 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3827 static int need_output(void)
3828 {
3829  int i;
3830 
3831  for (i = 0; i < nb_output_streams; i++) {
3832  OutputStream *ost = output_streams[i];
3833  OutputFile *of = output_files[ost->file_index];
3834  AVFormatContext *os = output_files[ost->file_index]->ctx;
3835 
3836  if (ost->finished ||
3837  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3838  continue;
3839  if (ost->frame_number >= ost->max_frames) {
3840  int j;
3841  for (j = 0; j < of->ctx->nb_streams; j++)
3842  close_output_stream(output_streams[of->ost_index + j]);
3843  continue;
3844  }
3845 
3846  return 1;
3847  }
3848 
3849  return 0;
3850 }
3851 
3852 /**
3853  * Select the output stream to process.
3854  *
3855  * @return selected output stream, or NULL if none available
3856  */
3858 {
3859  int i;
3860  int64_t opts_min = INT64_MAX;
3861  OutputStream *ost_min = NULL;
3862 
3863  for (i = 0; i < nb_output_streams; i++) {
3864  OutputStream *ost = output_streams[i];
3865  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3866  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3867  AV_TIME_BASE_Q);
3868  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3870  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3871  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3872 
3873  if (!ost->initialized && !ost->inputs_done)
3874  return ost;
3875 
3876  if (!ost->finished && opts < opts_min) {
3877  opts_min = opts;
3878  ost_min = ost->unavailable ? NULL : ost;
3879  }
3880  }
3881  return ost_min;
3882 }
3883 
3884 static void set_tty_echo(int on)
3885 {
3886 #if HAVE_TERMIOS_H
3887  struct termios tty;
3888  if (tcgetattr(0, &tty) == 0) {
3889  if (on) tty.c_lflag |= ECHO;
3890  else tty.c_lflag &= ~ECHO;
3891  tcsetattr(0, TCSANOW, &tty);
3892  }
3893 #endif
3894 }
3895 
3896 static int check_keyboard_interaction(int64_t cur_time)
3897 {
3898  int i, ret, key;
3899  static int64_t last_time;
3900  if (received_nb_signals)
3901  return AVERROR_EXIT;
3902  /* read_key() returns 0 on EOF */
3903  if(cur_time - last_time >= 100000 && !run_as_daemon){
3904  key = read_key();
3905  last_time = cur_time;
3906  }else
3907  key = -1;
3908  if (key == 'q')
3909  return AVERROR_EXIT;
3910  if (key == '+') av_log_set_level(av_log_get_level()+10);
3911  if (key == '-') av_log_set_level(av_log_get_level()-10);
3912  if (key == 's') qp_hist ^= 1;
3913  if (key == 'h'){
3914  if (do_hex_dump){
3915  do_hex_dump = do_pkt_dump = 0;
3916  } else if(do_pkt_dump){
3917  do_hex_dump = 1;
3918  } else
3919  do_pkt_dump = 1;
3921  }
3922  if (key == 'c' || key == 'C'){
3923  char buf[4096], target[64], command[256], arg[256] = {0};
3924  double time;
3925  int k, n = 0;
3926  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3927  i = 0;
3928  set_tty_echo(1);
3929  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3930  if (k > 0)
3931  buf[i++] = k;
3932  buf[i] = 0;
3933  set_tty_echo(0);
3934  fprintf(stderr, "\n");
3935  if (k > 0 &&
3936  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3937  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3938  target, time, command, arg);
3939  for (i = 0; i < nb_filtergraphs; i++) {
3940  FilterGraph *fg = filtergraphs[i];
3941  if (fg->graph) {
3942  if (time < 0) {
3943  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3944  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3945  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3946  } else if (key == 'c') {
3947  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3948  ret = AVERROR_PATCHWELCOME;
3949  } else {
3950  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3951  if (ret < 0)
3952  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3953  }
3954  }
3955  }
3956  } else {
3958  "Parse error, at least 3 arguments were expected, "
3959  "only %d given in string '%s'\n", n, buf);
3960  }
3961  }
3962  if (key == 'd' || key == 'D'){
3963  int debug=0;
3964  if(key == 'D') {
3965  debug = input_streams[0]->st->codec->debug<<1;
3966  if(!debug) debug = 1;
3967  while(debug & (FF_DEBUG_DCT_COEFF
3968 #if FF_API_DEBUG_MV
3969  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3970 #endif
3971  )) //unsupported, would just crash
3972  debug += debug;
3973  }else{
3974  char buf[32];
3975  int k = 0;
3976  i = 0;
3977  set_tty_echo(1);
3978  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3979  if (k > 0)
3980  buf[i++] = k;
3981  buf[i] = 0;
3982  set_tty_echo(0);
3983  fprintf(stderr, "\n");
3984  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3985  fprintf(stderr,"error parsing debug value\n");
3986  }
3987  for(i=0;i<nb_input_streams;i++) {
3988  input_streams[i]->st->codec->debug = debug;
3989  }
3990  for(i=0;i<nb_output_streams;i++) {
3991  OutputStream *ost = output_streams[i];
3992  ost->enc_ctx->debug = debug;
3993  }
3994  if(debug) av_log_set_level(AV_LOG_DEBUG);
3995  fprintf(stderr,"debug=%d\n", debug);
3996  }
3997  if (key == '?'){
3998  fprintf(stderr, "key function\n"
3999  "? show this help\n"
4000  "+ increase verbosity\n"
4001  "- decrease verbosity\n"
4002  "c Send command to first matching filter supporting it\n"
4003  "C Send/Queue command to all matching filters\n"
4004  "D cycle through available debug modes\n"
4005  "h dump packets/hex press to cycle through the 3 states\n"
4006  "q quit\n"
4007  "s Show QP histogram\n"
4008  );
4009  }
4010  return 0;
4011 }
4012 
4013 #if HAVE_THREADS
4014 static void *input_thread(void *arg)
4015 {
4016  InputFile *f = arg;
4017  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4018  int ret = 0;
4019 
4020  while (1) {
4021  AVPacket pkt;
4022  ret = av_read_frame(f->ctx, &pkt);
4023 
4024  if (ret == AVERROR(EAGAIN)) {
4025  av_usleep(10000);
4026  continue;
4027  }
4028  if (ret < 0) {
4029  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4030  break;
4031  }
4032  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4033  if (flags && ret == AVERROR(EAGAIN)) {
4034  flags = 0;
4035  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4037  "Thread message queue blocking; consider raising the "
4038  "thread_queue_size option (current value: %d)\n",
4039  f->thread_queue_size);
4040  }
4041  if (ret < 0) {
4042  if (ret != AVERROR_EOF)
4043  av_log(f->ctx, AV_LOG_ERROR,
4044  "Unable to send packet to main thread: %s\n",
4045  av_err2str(ret));
4046  av_packet_unref(&pkt);
4047  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4048  break;
4049  }
4050  }
4051 
4052  return NULL;
4053 }
4054 
4055 static void free_input_thread(int i)
4056 {
4057  InputFile *f = input_files[i];
4058  AVPacket pkt;
4059 
4060  if (!f || !f->in_thread_queue)
4061  return;
4063  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4064  av_packet_unref(&pkt);
4065 
4066  pthread_join(f->thread, NULL);
4067  f->joined = 1;
4068  av_thread_message_queue_free(&f->in_thread_queue);
4069 }
4070 
4071 static void free_input_threads(void)
4072 {
4073  int i;
4074 
4075  for (i = 0; i < nb_input_files; i++)
4076  free_input_thread(i);
4077 }
4078 
4079 static int init_input_thread(int i)
4080 {
4081  int ret;
4082  InputFile *f = input_files[i];
4083 
4084  if (nb_input_files == 1)
4085  return 0;
4086 
4087  if (f->ctx->pb ? !f->ctx->pb->seekable :
4088  strcmp(f->ctx->iformat->name, "lavfi"))
4089  f->non_blocking = 1;
4090  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4091  f->thread_queue_size, sizeof(AVPacket));
4092  if (ret < 0)
4093  return ret;
4094 
4095  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4096  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4097  av_thread_message_queue_free(&f->in_thread_queue);
4098  return AVERROR(ret);
4099  }
4100 
4101  return 0;
4102 }
4103 
4104 static int init_input_threads(void)
4105 {
4106  int i, ret;
4107 
4108  for (i = 0; i < nb_input_files; i++) {
4109  ret = init_input_thread(i);
4110  if (ret < 0)
4111  return ret;
4112  }
4113  return 0;
4114 }
4115 
4116 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4117 {
4118  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4119  f->non_blocking ?
4121 }
4122 #endif
4123 
4125 {
4126  if (f->rate_emu) {
4127  int i;
4128  for (i = 0; i < f->nb_streams; i++) {
4129  InputStream *ist = input_streams[f->ist_index + i];
4130  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4131  int64_t now = av_gettime_relative() - ist->start;
4132  if (pts > now)
4133  return AVERROR(EAGAIN);
4134  }
4135  }
4136 
4137 #if HAVE_THREADS
4138  if (nb_input_files > 1)
4139  return get_input_packet_mt(f, pkt);
4140 #endif
4141  return av_read_frame(f->ctx, pkt);
4142 }
4143 
4144 static int got_eagain(void)
4145 {
4146  int i;
4147  for (i = 0; i < nb_output_streams; i++)
4148  if (output_streams[i]->unavailable)
4149  return 1;
4150  return 0;
4151 }
4152 
4153 static void reset_eagain(void)
4154 {
4155  int i;
4156  for (i = 0; i < nb_input_files; i++)
4157  input_files[i]->eagain = 0;
4158  for (i = 0; i < nb_output_streams; i++)
4159  output_streams[i]->unavailable = 0;
4160 }
4161 
4162 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4163 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4164  AVRational time_base)
4165 {
4166  int ret;
4167 
4168  if (!*duration) {
4169  *duration = tmp;
4170  return tmp_time_base;
4171  }
4172 
4173  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4174  if (ret < 0) {
4175  *duration = tmp;
4176  return tmp_time_base;
4177  }
4178 
4179  return time_base;
4180 }
4181 
4183 {
4184  InputStream *ist;
4185  AVCodecContext *avctx;
4186  int i, ret, has_audio = 0;
4187  int64_t duration = 0;
4188 
4189  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4190  if (ret < 0)
4191  return ret;
4192 
4193  for (i = 0; i < ifile->nb_streams; i++) {
4194  ist = input_streams[ifile->ist_index + i];
4195  avctx = ist->dec_ctx;
4196 
4197  /* duration is the length of the last frame in a stream
4198  * when audio stream is present we don't care about
4199  * last video frame length because it's not defined exactly */
4200  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4201  has_audio = 1;
4202  }
4203 
4204  for (i = 0; i < ifile->nb_streams; i++) {
4205  ist = input_streams[ifile->ist_index + i];
4206  avctx = ist->dec_ctx;
4207 
4208  if (has_audio) {
4209  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4210  AVRational sample_rate = {1, avctx->sample_rate};
4211 
4212  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4213  } else {
4214  continue;
4215  }
4216  } else {
4217  if (ist->framerate.num) {
4218  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4219  } else if (ist->st->avg_frame_rate.num) {
4220  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4221  } else {
4222  duration = 1;
4223  }
4224  }
4225  if (!ifile->duration)
4226  ifile->time_base = ist->st->time_base;
4227  /* the total duration of the stream, max_pts - min_pts is
4228  * the duration of the stream without the last frame */
4229  duration += ist->max_pts - ist->min_pts;
4230  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4231  ifile->time_base);
4232  }
4233 
4234  if (ifile->loop > 0)
4235  ifile->loop--;
4236 
4237  return ret;
4238 }
4239 
4240 /*
4241  * Return
4242  * - 0 -- one packet was read and processed
4243  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4244  * this function should be called again
4245  * - AVERROR_EOF -- this function should not be called again
4246  */
4247 static int process_input(int file_index)
4248 {
4249  InputFile *ifile = input_files[file_index];
4251  InputStream *ist;
4252  AVPacket pkt;
4253  int ret, thread_ret, i, j;
4254  int64_t duration;
4255  int64_t pkt_dts;
4256 
4257  is = ifile->ctx;
4258  ret = get_input_packet(ifile, &pkt);
4259 
4260  if (ret == AVERROR(EAGAIN)) {
4261  ifile->eagain = 1;
4262  return ret;
4263  }
4264  if (ret < 0 && ifile->loop) {
4265  AVCodecContext *avctx;
4266  for (i = 0; i < ifile->nb_streams; i++) {
4267  ist = input_streams[ifile->ist_index + i];
4268  avctx = ist->dec_ctx;
4269  if (ist->decoding_needed) {
4270  ret = process_input_packet(ist, NULL, 1);
4271  if (ret>0)
4272  return 0;
4273  avcodec_flush_buffers(avctx);
4274  }
4275  }
4276 #if HAVE_THREADS
4277  free_input_thread(file_index);
4278 #endif
4279  ret = seek_to_start(ifile, is);
4280 #if HAVE_THREADS
4281  thread_ret = init_input_thread(file_index);
4282  if (thread_ret < 0)
4283  return thread_ret;
4284 #endif
4285  if (ret < 0)
4286  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4287  else
4288  ret = get_input_packet(ifile, &pkt);
4289  if (ret == AVERROR(EAGAIN)) {
4290  ifile->eagain = 1;
4291  return ret;
4292  }
4293  }
4294  if (ret < 0) {
4295  if (ret != AVERROR_EOF) {
4296  print_error(is->url, ret);
4297  if (exit_on_error)
4298  exit_program(1);
4299  }
4300 
4301  for (i = 0; i < ifile->nb_streams; i++) {
4302  ist = input_streams[ifile->ist_index + i];
4303  if (ist->decoding_needed) {
4304  ret = process_input_packet(ist, NULL, 0);
4305  if (ret>0)
4306  return 0;
4307  }
4308 
4309  /* mark all outputs that don't go through lavfi as finished */
4310  for (j = 0; j < nb_output_streams; j++) {
4311  OutputStream *ost = output_streams[j];
4312 
4313  if (ost->source_index == ifile->ist_index + i &&
4314  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4315  finish_output_stream(ost);
4316  }
4317  }
4318 
4319  ifile->eof_reached = 1;
4320  return AVERROR(EAGAIN);
4321  }
4322 
4323  reset_eagain();
4324 
4325  if (do_pkt_dump) {
4327  is->streams[pkt.stream_index]);
4328  }
4329  /* the following test is needed in case new streams appear
4330  dynamically in stream : we ignore them */
4331  if (pkt.stream_index >= ifile->nb_streams) {
4332  report_new_stream(file_index, &pkt);
4333  goto discard_packet;
4334  }
4335 
4336  ist = input_streams[ifile->ist_index + pkt.stream_index];
4337 
4338  ist->data_size += pkt.size;
4339  ist->nb_packets++;
4340 
4341  if (ist->discard)
4342  goto discard_packet;
4343 
4344  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4346  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4347  if (exit_on_error)
4348  exit_program(1);
4349  }
4350 
4351  if (debug_ts) {
4352  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4353  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4357  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4358  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4359  av_ts2str(input_files[ist->file_index]->ts_offset),
4360  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4361  }
4362 
4363  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4364  int64_t stime, stime2;
4365  // Correcting starttime based on the enabled streams
4366  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4367  // so we instead do it here as part of discontinuity handling
4368  if ( ist->next_dts == AV_NOPTS_VALUE
4369  && ifile->ts_offset == -is->start_time
4370  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4371  int64_t new_start_time = INT64_MAX;
4372  for (i=0; i<is->nb_streams; i++) {
4373  AVStream *st = is->streams[i];
4374  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4375  continue;
4376  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4377  }
4378  if (new_start_time > is->start_time) {
4379  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4380  ifile->ts_offset = -new_start_time;
4381  }
4382  }
4383 
4384  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4385  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4386  ist->wrap_correction_done = 1;
4387 
4388  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4389  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4390  ist->wrap_correction_done = 0;
4391  }
4392  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4393  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4394  ist->wrap_correction_done = 0;
4395  }
4396  }
4397 
4398  /* add the stream-global side data to the first packet */
4399  if (ist->nb_packets == 1) {
4400  for (i = 0; i < ist->st->nb_side_data; i++) {
4401  AVPacketSideData *src_sd = &ist->st->side_data[i];
4402  uint8_t *dst_data;
4403 
4404  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4405  continue;
4406 
4407  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4408  continue;
4409 
4410  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4411  if (!dst_data)
4412  exit_program(1);
4413 
4414  memcpy(dst_data, src_sd->data, src_sd->size);
4415  }
4416  }
4417 
4418  if (pkt.dts != AV_NOPTS_VALUE)
4419  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4420  if (pkt.pts != AV_NOPTS_VALUE)
4421  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4422 
4423  if (pkt.pts != AV_NOPTS_VALUE)
4424  pkt.pts *= ist->ts_scale;
4425  if (pkt.dts != AV_NOPTS_VALUE)
4426  pkt.dts *= ist->ts_scale;
4427 
4429  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4431  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4432  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4433  int64_t delta = pkt_dts - ifile->last_ts;
4434  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4435  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4436  ifile->ts_offset -= delta;
4438  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4439  delta, ifile->ts_offset);
4440  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4441  if (pkt.pts != AV_NOPTS_VALUE)
4442  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4443  }
4444  }
4445 
4446  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4447  if (pkt.pts != AV_NOPTS_VALUE) {
4448  pkt.pts += duration;
4449  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4450  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4451  }
4452 
4453  if (pkt.dts != AV_NOPTS_VALUE)
4454  pkt.dts += duration;
4455 
4457  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4459  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4460  !copy_ts) {
4461  int64_t delta = pkt_dts - ist->next_dts;
4462  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4463  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4464  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4465  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4466  ifile->ts_offset -= delta;
4468  "timestamp discontinuity for stream #%d:%d "
4469  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4470  ist->file_index, ist->st->index, ist->st->id,
4472  delta, ifile->ts_offset);
4473  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4474  if (pkt.pts != AV_NOPTS_VALUE)
4475  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4476  }
4477  } else {
4478  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4479  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4480  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4481  pkt.dts = AV_NOPTS_VALUE;
4482  }
4483  if (pkt.pts != AV_NOPTS_VALUE){
4484  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4485  delta = pkt_pts - ist->next_dts;
4486  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4487  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4488  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4489  pkt.pts = AV_NOPTS_VALUE;
4490  }
4491  }
4492  }
4493  }
4494 
4495  if (pkt.dts != AV_NOPTS_VALUE)
4496  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4497 
4498  if (debug_ts) {
4499  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4501  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4502  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4503  av_ts2str(input_files[ist->file_index]->ts_offset),
4504  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4505  }
4506 
4507  sub2video_heartbeat(ist, pkt.pts);
4508 
4509  process_input_packet(ist, &pkt, 0);
4510 
4511 discard_packet:
4512  av_packet_unref(&pkt);
4513 
4514  return 0;
4515 }
4516 
4517 /**
4518  * Perform a step of transcoding for the specified filter graph.
4519  *
4520  * @param[in] graph filter graph to consider
4521  * @param[out] best_ist input stream where a frame would allow to continue
4522  * @return 0 for success, <0 for error
4523  */
4524 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4525 {
4526  int i, ret;
4527  int nb_requests, nb_requests_max = 0;
4528  InputFilter *ifilter;
4529  InputStream *ist;
4530 
4531  *best_ist = NULL;
4532  ret = avfilter_graph_request_oldest(graph->graph);
4533  if (ret >= 0)
4534  return reap_filters(0);
4535 
4536  if (ret == AVERROR_EOF) {
4537  ret = reap_filters(1);
4538  for (i = 0; i < graph->nb_outputs; i++)
4539  close_output_stream(graph->outputs[i]->ost);
4540  return ret;
4541  }
4542  if (ret != AVERROR(EAGAIN))
4543  return ret;
4544 
4545  for (i = 0; i < graph->nb_inputs; i++) {
4546  ifilter = graph->inputs[i];
4547  ist = ifilter->ist;
4548  if (input_files[ist->file_index]->eagain ||
4549  input_files[ist->file_index]->eof_reached)
4550  continue;
4551  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4552  if (nb_requests > nb_requests_max) {
4553  nb_requests_max = nb_requests;
4554  *best_ist = ist;
4555  }
4556  }
4557 
4558  if (!*best_ist)
4559  for (i = 0; i < graph->nb_outputs; i++)
4560  graph->outputs[i]->ost->unavailable = 1;