FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570 
573 
574  if (ost->muxing_queue) {
575  while (av_fifo_size(ost->muxing_queue)) {
576  AVPacket pkt;
577  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578  av_packet_unref(&pkt);
579  }
581  }
582 
583  av_freep(&output_streams[i]);
584  }
585 #if HAVE_THREADS
586  free_input_threads();
587 #endif
588  for (i = 0; i < nb_input_files; i++) {
589  avformat_close_input(&input_files[i]->ctx);
590  av_freep(&input_files[i]);
591  }
592  for (i = 0; i < nb_input_streams; i++) {
593  InputStream *ist = input_streams[i];
594 
597  av_dict_free(&ist->decoder_opts);
600  av_freep(&ist->filters);
601  av_freep(&ist->hwaccel_device);
602  av_freep(&ist->dts_buffer);
603 
605 
606  av_freep(&input_streams[i]);
607  }
608 
609  if (vstats_file) {
610  if (fclose(vstats_file))
612  "Error closing vstats file, loss of information possible: %s\n",
613  av_err2str(AVERROR(errno)));
614  }
616 
617  av_freep(&input_streams);
618  av_freep(&input_files);
619  av_freep(&output_streams);
620  av_freep(&output_files);
621 
622  uninit_opts();
623 
625 
626  if (received_sigterm) {
627  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628  (int) received_sigterm);
629  } else if (ret && atomic_load(&transcode_init_done)) {
630  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
631  }
632  term_exit();
633  ffmpeg_exited = 1;
634 }
635 
637 {
638  AVDictionaryEntry *t = NULL;
639 
640  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642  }
643 }
644 
646 {
648  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650  exit_program(1);
651  }
652 }
653 
654 static void abort_codec_experimental(AVCodec *c, int encoder)
655 {
656  exit_program(1);
657 }
658 
659 static void update_benchmark(const char *fmt, ...)
660 {
661  if (do_benchmark_all) {
663  va_list va;
664  char buf[1024];
665 
666  if (fmt) {
667  va_start(va, fmt);
668  vsnprintf(buf, sizeof(buf), fmt, va);
669  va_end(va);
671  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672  t.user_usec - current_time.user_usec,
673  t.sys_usec - current_time.sys_usec,
674  t.real_usec - current_time.real_usec, buf);
675  }
676  current_time = t;
677  }
678 }
679 
681 {
682  int i;
683  for (i = 0; i < nb_output_streams; i++) {
684  OutputStream *ost2 = output_streams[i];
685  ost2->finished |= ost == ost2 ? this_stream : others;
686  }
687 }
688 
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
690 {
691  AVFormatContext *s = of->ctx;
692  AVStream *st = ost->st;
693  int ret;
694 
695  /*
696  * Audio encoders may split the packets -- #frames in != #packets out.
697  * But there is no reordering, so we can limit the number of output packets
698  * by simply dropping them here.
699  * Counting encoded video frames needs to be done separately because of
700  * reordering, see do_video_out().
701  * Do not count the packet when unqueued because it has been counted when queued.
702  */
703  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704  if (ost->frame_number >= ost->max_frames) {
705  av_packet_unref(pkt);
706  return;
707  }
708  ost->frame_number++;
709  }
710 
711  if (!of->header_written) {
712  AVPacket tmp_pkt = {0};
713  /* the muxer is not initialized yet, buffer the packet */
714  if (!av_fifo_space(ost->muxing_queue)) {
715  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716  ost->max_muxing_queue_size);
717  if (new_size <= av_fifo_size(ost->muxing_queue)) {
719  "Too many packets buffered for output stream %d:%d.\n",
720  ost->file_index, ost->st->index);
721  exit_program(1);
722  }
723  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
724  if (ret < 0)
725  exit_program(1);
726  }
727  ret = av_packet_make_refcounted(pkt);
728  if (ret < 0)
729  exit_program(1);
730  av_packet_move_ref(&tmp_pkt, pkt);
731  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
732  return;
733  }
734 
737  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
738 
739  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
740  int i;
742  NULL);
743  ost->quality = sd ? AV_RL32(sd) : -1;
744  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
745 
746  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
747  if (sd && i < sd[5])
748  ost->error[i] = AV_RL64(sd + 8 + 8*i);
749  else
750  ost->error[i] = -1;
751  }
752 
753  if (ost->frame_rate.num && ost->is_cfr) {
754  if (pkt->duration > 0)
755  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
757  ost->mux_timebase);
758  }
759  }
760 
761  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
762 
763  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764  if (pkt->dts != AV_NOPTS_VALUE &&
765  pkt->pts != AV_NOPTS_VALUE &&
766  pkt->dts > pkt->pts) {
767  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
768  pkt->dts, pkt->pts,
769  ost->file_index, ost->st->index);
770  pkt->pts =
771  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
774  }
776  pkt->dts != AV_NOPTS_VALUE &&
777  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778  ost->last_mux_dts != AV_NOPTS_VALUE) {
779  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780  if (pkt->dts < max) {
781  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782  av_log(s, loglevel, "Non-monotonous DTS in output stream "
783  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
785  if (exit_on_error) {
786  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
787  exit_program(1);
788  }
789  av_log(s, loglevel, "changing to %"PRId64". This may result "
790  "in incorrect timestamps in the output file.\n",
791  max);
792  if (pkt->pts >= pkt->dts)
793  pkt->pts = FFMAX(pkt->pts, max);
794  pkt->dts = max;
795  }
796  }
797  }
798  ost->last_mux_dts = pkt->dts;
799 
800  ost->data_size += pkt->size;
801  ost->packets_written++;
802 
803  pkt->stream_index = ost->index;
804 
805  if (debug_ts) {
806  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
811  pkt->size
812  );
813  }
814 
815  ret = av_interleaved_write_frame(s, pkt);
816  if (ret < 0) {
817  print_error("av_interleaved_write_frame()", ret);
818  main_return_code = 1;
820  }
821  av_packet_unref(pkt);
822 }
823 
825 {
826  OutputFile *of = output_files[ost->file_index];
827 
828  ost->finished |= ENCODER_FINISHED;
829  if (of->shortest) {
830  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831  of->recording_time = FFMIN(of->recording_time, end);
832  }
833 }
834 
835 /*
836  * Send a single packet to the output, applying any bitstream filters
837  * associated with the output stream. This may result in any number
838  * of packets actually being written, depending on what bitstream
839  * filters are applied. The supplied packet is consumed and will be
840  * blank (as if newly-allocated) when this function returns.
841  *
842  * If eof is set, instead indicate EOF to all bitstream filters and
843  * therefore flush any delayed packets to the output. A blank packet
844  * must be supplied in this case.
845  */
847  OutputStream *ost, int eof)
848 {
849  int ret = 0;
850 
851  /* apply the output bitstream filters, if any */
852  if (ost->nb_bitstream_filters) {
853  int idx;
854 
855  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
856  if (ret < 0)
857  goto finish;
858 
859  eof = 0;
860  idx = 1;
861  while (idx) {
862  /* get a packet from the previous filter up the chain */
863  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864  if (ret == AVERROR(EAGAIN)) {
865  ret = 0;
866  idx--;
867  continue;
868  } else if (ret == AVERROR_EOF) {
869  eof = 1;
870  } else if (ret < 0)
871  goto finish;
872 
873  /* send it to the next filter down the chain or to the muxer */
874  if (idx < ost->nb_bitstream_filters) {
875  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
876  if (ret < 0)
877  goto finish;
878  idx++;
879  eof = 0;
880  } else if (eof)
881  goto finish;
882  else
883  write_packet(of, pkt, ost, 0);
884  }
885  } else if (!eof)
886  write_packet(of, pkt, ost, 0);
887 
888 finish:
889  if (ret < 0 && ret != AVERROR_EOF) {
890  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
892  if(exit_on_error)
893  exit_program(1);
894  }
895 }
896 
898 {
899  OutputFile *of = output_files[ost->file_index];
900 
901  if (of->recording_time != INT64_MAX &&
903  AV_TIME_BASE_Q) >= 0) {
904  close_output_stream(ost);
905  return 0;
906  }
907  return 1;
908 }
909 
911  AVFrame *frame)
912 {
913  AVCodecContext *enc = ost->enc_ctx;
914  AVPacket pkt;
915  int ret;
916 
917  av_init_packet(&pkt);
918  pkt.data = NULL;
919  pkt.size = 0;
920 
921  if (!check_recording_time(ost))
922  return;
923 
924  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925  frame->pts = ost->sync_opts;
926  ost->sync_opts = frame->pts + frame->nb_samples;
927  ost->samples_encoded += frame->nb_samples;
928  ost->frames_encoded++;
929 
930  av_assert0(pkt.size || !pkt.data);
932  if (debug_ts) {
933  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936  enc->time_base.num, enc->time_base.den);
937  }
938 
939  ret = avcodec_send_frame(enc, frame);
940  if (ret < 0)
941  goto error;
942 
943  while (1) {
944  ret = avcodec_receive_packet(enc, &pkt);
945  if (ret == AVERROR(EAGAIN))
946  break;
947  if (ret < 0)
948  goto error;
949 
950  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
951 
952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
953 
954  if (debug_ts) {
955  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
959  }
960 
961  output_packet(of, &pkt, ost, 0);
962  }
963 
964  return;
965 error:
966  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
967  exit_program(1);
968 }
969 
970 static void do_subtitle_out(OutputFile *of,
971  OutputStream *ost,
972  AVSubtitle *sub)
973 {
974  int subtitle_out_max_size = 1024 * 1024;
975  int subtitle_out_size, nb, i;
976  AVCodecContext *enc;
977  AVPacket pkt;
978  int64_t pts;
979 
980  if (sub->pts == AV_NOPTS_VALUE) {
981  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982  if (exit_on_error)
983  exit_program(1);
984  return;
985  }
986 
987  enc = ost->enc_ctx;
988 
989  if (!subtitle_out) {
990  subtitle_out = av_malloc(subtitle_out_max_size);
991  if (!subtitle_out) {
992  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
993  exit_program(1);
994  }
995  }
996 
997  /* Note: DVB subtitle need one packet to draw them and one other
998  packet to clear them */
999  /* XXX: signal it in the codec context ? */
1000  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1001  nb = 2;
1002  else
1003  nb = 1;
1004 
1005  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1006  pts = sub->pts;
1007  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008  pts -= output_files[ost->file_index]->start_time;
1009  for (i = 0; i < nb; i++) {
1010  unsigned save_num_rects = sub->num_rects;
1011 
1012  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  sub->pts = pts;
1017  // start_display_time is required to be 0
1018  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019  sub->end_display_time -= sub->start_display_time;
1020  sub->start_display_time = 0;
1021  if (i == 1)
1022  sub->num_rects = 0;
1023 
1024  ost->frames_encoded++;
1025 
1026  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027  subtitle_out_max_size, sub);
1028  if (i == 1)
1029  sub->num_rects = save_num_rects;
1030  if (subtitle_out_size < 0) {
1031  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1032  exit_program(1);
1033  }
1034 
1035  av_init_packet(&pkt);
1036  pkt.data = subtitle_out;
1037  pkt.size = subtitle_out_size;
1038  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041  /* XXX: the pts correction is handled here. Maybe handling
1042  it in the codec would be better */
1043  if (i == 0)
1044  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1045  else
1046  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047  }
1048  pkt.dts = pkt.pts;
1049  output_packet(of, &pkt, ost, 0);
1050  }
1051 }
1052 
1053 static void do_video_out(OutputFile *of,
1054  OutputStream *ost,
1055  AVFrame *next_picture,
1056  double sync_ipts)
1057 {
1058  int ret, format_video_sync;
1059  AVPacket pkt;
1060  AVCodecContext *enc = ost->enc_ctx;
1061  AVCodecParameters *mux_par = ost->st->codecpar;
1062  AVRational frame_rate;
1063  int nb_frames, nb0_frames, i;
1064  double delta, delta0;
1065  double duration = 0;
1066  int frame_size = 0;
1067  InputStream *ist = NULL;
1069 
1070  if (ost->source_index >= 0)
1071  ist = input_streams[ost->source_index];
1072 
1073  frame_rate = av_buffersink_get_frame_rate(filter);
1074  if (frame_rate.num > 0 && frame_rate.den > 0)
1075  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1076 
1077  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1079 
1080  if (!ost->filters_script &&
1081  !ost->filters &&
1082  next_picture &&
1083  ist &&
1084  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1085  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1086  }
1087 
1088  if (!next_picture) {
1089  //end, flushing
1090  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1091  ost->last_nb0_frames[1],
1092  ost->last_nb0_frames[2]);
1093  } else {
1094  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1095  delta = delta0 + duration;
1096 
1097  /* by default, we output a single frame */
1098  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1099  nb_frames = 1;
1100 
1101  format_video_sync = video_sync_method;
1102  if (format_video_sync == VSYNC_AUTO) {
1103  if(!strcmp(of->ctx->oformat->name, "avi")) {
1104  format_video_sync = VSYNC_VFR;
1105  } else
1106  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1107  if ( ist
1108  && format_video_sync == VSYNC_CFR
1109  && input_files[ist->file_index]->ctx->nb_streams == 1
1110  && input_files[ist->file_index]->input_ts_offset == 0) {
1111  format_video_sync = VSYNC_VSCFR;
1112  }
1113  if (format_video_sync == VSYNC_CFR && copy_ts) {
1114  format_video_sync = VSYNC_VSCFR;
1115  }
1116  }
1117  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1118 
1119  if (delta0 < 0 &&
1120  delta > 0 &&
1121  format_video_sync != VSYNC_PASSTHROUGH &&
1122  format_video_sync != VSYNC_DROP) {
1123  if (delta0 < -0.6) {
1124  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1125  } else
1126  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1127  sync_ipts = ost->sync_opts;
1128  duration += delta0;
1129  delta0 = 0;
1130  }
1131 
1132  switch (format_video_sync) {
1133  case VSYNC_VSCFR:
1134  if (ost->frame_number == 0 && delta0 >= 0.5) {
1135  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1136  delta = duration;
1137  delta0 = 0;
1138  ost->sync_opts = lrint(sync_ipts);
1139  }
1140  case VSYNC_CFR:
1141  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1142  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1143  nb_frames = 0;
1144  } else if (delta < -1.1)
1145  nb_frames = 0;
1146  else if (delta > 1.1) {
1147  nb_frames = lrintf(delta);
1148  if (delta0 > 1.1)
1149  nb0_frames = lrintf(delta0 - 0.6);
1150  }
1151  break;
1152  case VSYNC_VFR:
1153  if (delta <= -0.6)
1154  nb_frames = 0;
1155  else if (delta > 0.6)
1156  ost->sync_opts = lrint(sync_ipts);
1157  break;
1158  case VSYNC_DROP:
1159  case VSYNC_PASSTHROUGH:
1160  ost->sync_opts = lrint(sync_ipts);
1161  break;
1162  default:
1163  av_assert0(0);
1164  }
1165  }
1166 
1167  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1168  nb0_frames = FFMIN(nb0_frames, nb_frames);
1169 
1170  memmove(ost->last_nb0_frames + 1,
1171  ost->last_nb0_frames,
1172  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1173  ost->last_nb0_frames[0] = nb0_frames;
1174 
1175  if (nb0_frames == 0 && ost->last_dropped) {
1176  nb_frames_drop++;
1178  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1179  ost->frame_number, ost->st->index, ost->last_frame->pts);
1180  }
1181  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1182  if (nb_frames > dts_error_threshold * 30) {
1183  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1184  nb_frames_drop++;
1185  return;
1186  }
1187  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1188  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1189  if (nb_frames_dup > dup_warning) {
1190  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1191  dup_warning *= 10;
1192  }
1193  }
1194  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1195 
1196  /* duplicates frame if needed */
1197  for (i = 0; i < nb_frames; i++) {
1198  AVFrame *in_picture;
1199  int forced_keyframe = 0;
1200  double pts_time;
1201  av_init_packet(&pkt);
1202  pkt.data = NULL;
1203  pkt.size = 0;
1204 
1205  if (i < nb0_frames && ost->last_frame) {
1206  in_picture = ost->last_frame;
1207  } else
1208  in_picture = next_picture;
1209 
1210  if (!in_picture)
1211  return;
1212 
1213  in_picture->pts = ost->sync_opts;
1214 
1215  if (!check_recording_time(ost))
1216  return;
1217 
1219  ost->top_field_first >= 0)
1220  in_picture->top_field_first = !!ost->top_field_first;
1221 
1222  if (in_picture->interlaced_frame) {
1223  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1224  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1225  else
1226  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1227  } else
1228  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1229 
1230  in_picture->quality = enc->global_quality;
1231  in_picture->pict_type = 0;
1232 
1233  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1234  in_picture->pts != AV_NOPTS_VALUE)
1235  ost->forced_kf_ref_pts = in_picture->pts;
1236 
1237  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1238  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1239  if (ost->forced_kf_index < ost->forced_kf_count &&
1240  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1241  ost->forced_kf_index++;
1242  forced_keyframe = 1;
1243  } else if (ost->forced_keyframes_pexpr) {
1244  double res;
1245  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1254  res);
1255  if (res) {
1256  forced_keyframe = 1;
1262  }
1263 
1265  } else if ( ost->forced_keyframes
1266  && !strncmp(ost->forced_keyframes, "source", 6)
1267  && in_picture->key_frame==1) {
1268  forced_keyframe = 1;
1269  }
1270 
1271  if (forced_keyframe) {
1272  in_picture->pict_type = AV_PICTURE_TYPE_I;
1273  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1274  }
1275 
1277  if (debug_ts) {
1278  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1279  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1280  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1281  enc->time_base.num, enc->time_base.den);
1282  }
1283 
1284  ost->frames_encoded++;
1285 
1286  ret = avcodec_send_frame(enc, in_picture);
1287  if (ret < 0)
1288  goto error;
1289  // Make sure Closed Captions will not be duplicated
1291 
1292  while (1) {
1293  ret = avcodec_receive_packet(enc, &pkt);
1294  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295  if (ret == AVERROR(EAGAIN))
1296  break;
1297  if (ret < 0)
1298  goto error;
1299 
1300  if (debug_ts) {
1301  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1305  }
1306 
1307  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308  pkt.pts = ost->sync_opts;
1309 
1310  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1311 
1312  if (debug_ts) {
1313  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1317  }
1318 
1319  frame_size = pkt.size;
1320  output_packet(of, &pkt, ost, 0);
1321 
1322  /* if two pass, output log */
1323  if (ost->logfile && enc->stats_out) {
1324  fprintf(ost->logfile, "%s", enc->stats_out);
1325  }
1326  }
1327  ost->sync_opts++;
1328  /*
1329  * For video, number of frames in == number of packets out.
1330  * But there may be reordering, so we can't throw away frames on encoder
1331  * flush, we need to limit them here, before they go into encoder.
1332  */
1333  ost->frame_number++;
1334 
1335  if (vstats_filename && frame_size)
1336  do_video_stats(ost, frame_size);
1337  }
1338 
1339  if (!ost->last_frame)
1340  ost->last_frame = av_frame_alloc();
1341  av_frame_unref(ost->last_frame);
1342  if (next_picture && ost->last_frame)
1343  av_frame_ref(ost->last_frame, next_picture);
1344  else
1345  av_frame_free(&ost->last_frame);
1346 
1347  return;
1348 error:
1349  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1350  exit_program(1);
1351 }
1352 
1353 static double psnr(double d)
1354 {
1355  return -10.0 * log10(d);
1356 }
1357 
1359 {
1360  AVCodecContext *enc;
1361  int frame_number;
1362  double ti1, bitrate, avg_bitrate;
1363 
1364  /* this is executed just the first time do_video_stats is called */
1365  if (!vstats_file) {
1366  vstats_file = fopen(vstats_filename, "w");
1367  if (!vstats_file) {
1368  perror("fopen");
1369  exit_program(1);
1370  }
1371  }
1372 
1373  enc = ost->enc_ctx;
1374  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1375  frame_number = ost->st->nb_frames;
1376  if (vstats_version <= 1) {
1377  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1378  ost->quality / (float)FF_QP2LAMBDA);
1379  } else {
1380  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1381  ost->quality / (float)FF_QP2LAMBDA);
1382  }
1383 
1384  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1385  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1386 
1387  fprintf(vstats_file,"f_size= %6d ", frame_size);
1388  /* compute pts value */
1389  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1390  if (ti1 < 0.01)
1391  ti1 = 0.01;
1392 
1393  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1394  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1395  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1396  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1397  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1398  }
1399 }
1400 
1401 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1402 
1404 {
1405  OutputFile *of = output_files[ost->file_index];
1406  int i;
1407 
1409 
1410  if (of->shortest) {
1411  for (i = 0; i < of->ctx->nb_streams; i++)
1412  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1413  }
1414 }
1415 
1416 /**
1417  * Get and encode new output from any of the filtergraphs, without causing
1418  * activity.
1419  *
1420  * @return 0 for success, <0 for severe errors
1421  */
1422 static int reap_filters(int flush)
1423 {
1424  AVFrame *filtered_frame = NULL;
1425  int i;
1426 
1427  /* Reap all buffers present in the buffer sinks */
1428  for (i = 0; i < nb_output_streams; i++) {
1429  OutputStream *ost = output_streams[i];
1430  OutputFile *of = output_files[ost->file_index];
1432  AVCodecContext *enc = ost->enc_ctx;
1433  int ret = 0;
1434 
1435  if (!ost->filter || !ost->filter->graph->graph)
1436  continue;
1437  filter = ost->filter->filter;
1438 
1439  if (!ost->initialized) {
1440  char error[1024] = "";
1441  ret = init_output_stream(ost, error, sizeof(error));
1442  if (ret < 0) {
1443  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1444  ost->file_index, ost->index, error);
1445  exit_program(1);
1446  }
1447  }
1448 
1449  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1450  return AVERROR(ENOMEM);
1451  }
1452  filtered_frame = ost->filtered_frame;
1453 
1454  while (1) {
1455  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1456  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1458  if (ret < 0) {
1459  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1461  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1462  } else if (flush && ret == AVERROR_EOF) {
1464  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1465  }
1466  break;
1467  }
1468  if (ost->finished) {
1469  av_frame_unref(filtered_frame);
1470  continue;
1471  }
1472  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1473  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1474  AVRational filter_tb = av_buffersink_get_time_base(filter);
1475  AVRational tb = enc->time_base;
1476  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1477 
1478  tb.den <<= extra_bits;
1479  float_pts =
1480  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1481  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1482  float_pts /= 1 << extra_bits;
1483  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1484  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1485 
1486  filtered_frame->pts =
1487  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1488  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1489  }
1490 
1491  switch (av_buffersink_get_type(filter)) {
1492  case AVMEDIA_TYPE_VIDEO:
1493  if (!ost->frame_aspect_ratio.num)
1494  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1495 
1496  if (debug_ts) {
1497  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1498  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1499  float_pts,
1500  enc->time_base.num, enc->time_base.den);
1501  }
1502 
1503  do_video_out(of, ost, filtered_frame, float_pts);
1504  break;
1505  case AVMEDIA_TYPE_AUDIO:
1506  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1507  enc->channels != filtered_frame->channels) {
1509  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1510  break;
1511  }
1512  do_audio_out(of, ost, filtered_frame);
1513  break;
1514  default:
1515  // TODO support subtitle filters
1516  av_assert0(0);
1517  }
1518 
1519  av_frame_unref(filtered_frame);
1520  }
1521  }
1522 
1523  return 0;
1524 }
1525 
1526 static void print_final_stats(int64_t total_size)
1527 {
1528  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1529  uint64_t subtitle_size = 0;
1530  uint64_t data_size = 0;
1531  float percent = -1.0;
1532  int i, j;
1533  int pass1_used = 1;
1534 
1535  for (i = 0; i < nb_output_streams; i++) {
1536  OutputStream *ost = output_streams[i];
1537  switch (ost->enc_ctx->codec_type) {
1538  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1539  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1540  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1541  default: other_size += ost->data_size; break;
1542  }
1543  extra_size += ost->enc_ctx->extradata_size;
1544  data_size += ost->data_size;
1547  pass1_used = 0;
1548  }
1549 
1550  if (data_size && total_size>0 && total_size >= data_size)
1551  percent = 100.0 * (total_size - data_size) / data_size;
1552 
1553  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1554  video_size / 1024.0,
1555  audio_size / 1024.0,
1556  subtitle_size / 1024.0,
1557  other_size / 1024.0,
1558  extra_size / 1024.0);
1559  if (percent >= 0.0)
1560  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1561  else
1562  av_log(NULL, AV_LOG_INFO, "unknown");
1563  av_log(NULL, AV_LOG_INFO, "\n");
1564 
1565  /* print verbose per-stream stats */
1566  for (i = 0; i < nb_input_files; i++) {
1567  InputFile *f = input_files[i];
1568  uint64_t total_packets = 0, total_size = 0;
1569 
1570  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1571  i, f->ctx->url);
1572 
1573  for (j = 0; j < f->nb_streams; j++) {
1574  InputStream *ist = input_streams[f->ist_index + j];
1575  enum AVMediaType type = ist->dec_ctx->codec_type;
1576 
1577  total_size += ist->data_size;
1578  total_packets += ist->nb_packets;
1579 
1580  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1581  i, j, media_type_string(type));
1582  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1583  ist->nb_packets, ist->data_size);
1584 
1585  if (ist->decoding_needed) {
1586  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1587  ist->frames_decoded);
1588  if (type == AVMEDIA_TYPE_AUDIO)
1589  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1590  av_log(NULL, AV_LOG_VERBOSE, "; ");
1591  }
1592 
1593  av_log(NULL, AV_LOG_VERBOSE, "\n");
1594  }
1595 
1596  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1597  total_packets, total_size);
1598  }
1599 
1600  for (i = 0; i < nb_output_files; i++) {
1601  OutputFile *of = output_files[i];
1602  uint64_t total_packets = 0, total_size = 0;
1603 
1604  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1605  i, of->ctx->url);
1606 
1607  for (j = 0; j < of->ctx->nb_streams; j++) {
1608  OutputStream *ost = output_streams[of->ost_index + j];
1609  enum AVMediaType type = ost->enc_ctx->codec_type;
1610 
1611  total_size += ost->data_size;
1612  total_packets += ost->packets_written;
1613 
1614  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1615  i, j, media_type_string(type));
1616  if (ost->encoding_needed) {
1617  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1618  ost->frames_encoded);
1619  if (type == AVMEDIA_TYPE_AUDIO)
1620  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1621  av_log(NULL, AV_LOG_VERBOSE, "; ");
1622  }
1623 
1624  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1625  ost->packets_written, ost->data_size);
1626 
1627  av_log(NULL, AV_LOG_VERBOSE, "\n");
1628  }
1629 
1630  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1631  total_packets, total_size);
1632  }
1633  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1634  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1635  if (pass1_used) {
1636  av_log(NULL, AV_LOG_WARNING, "\n");
1637  } else {
1638  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1639  }
1640  }
1641 }
1642 
1643 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1644 {
1645  AVBPrint buf, buf_script;
1646  OutputStream *ost;
1647  AVFormatContext *oc;
1648  int64_t total_size;
1649  AVCodecContext *enc;
1650  int frame_number, vid, i;
1651  double bitrate;
1652  double speed;
1653  int64_t pts = INT64_MIN + 1;
1654  static int64_t last_time = -1;
1655  static int qp_histogram[52];
1656  int hours, mins, secs, us;
1657  const char *hours_sign;
1658  int ret;
1659  float t;
1660 
1661  if (!print_stats && !is_last_report && !progress_avio)
1662  return;
1663 
1664  if (!is_last_report) {
1665  if (last_time == -1) {
1666  last_time = cur_time;
1667  return;
1668  }
1669  if ((cur_time - last_time) < 500000)
1670  return;
1671  last_time = cur_time;
1672  }
1673 
1674  t = (cur_time-timer_start) / 1000000.0;
1675 
1676 
1677  oc = output_files[0]->ctx;
1678 
1679  total_size = avio_size(oc->pb);
1680  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1681  total_size = avio_tell(oc->pb);
1682 
1683  vid = 0;
1685  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1686  for (i = 0; i < nb_output_streams; i++) {
1687  float q = -1;
1688  ost = output_streams[i];
1689  enc = ost->enc_ctx;
1690  if (!ost->stream_copy)
1691  q = ost->quality / (float) FF_QP2LAMBDA;
1692 
1693  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1694  av_bprintf(&buf, "q=%2.1f ", q);
1695  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1696  ost->file_index, ost->index, q);
1697  }
1698  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1699  float fps;
1700 
1701  frame_number = ost->frame_number;
1702  fps = t > 1 ? frame_number / t : 0;
1703  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1704  frame_number, fps < 9.95, fps, q);
1705  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1706  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1707  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1708  ost->file_index, ost->index, q);
1709  if (is_last_report)
1710  av_bprintf(&buf, "L");
1711  if (qp_hist) {
1712  int j;
1713  int qp = lrintf(q);
1714  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1715  qp_histogram[qp]++;
1716  for (j = 0; j < 32; j++)
1717  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1718  }
1719 
1720  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1721  int j;
1722  double error, error_sum = 0;
1723  double scale, scale_sum = 0;
1724  double p;
1725  char type[3] = { 'Y','U','V' };
1726  av_bprintf(&buf, "PSNR=");
1727  for (j = 0; j < 3; j++) {
1728  if (is_last_report) {
1729  error = enc->error[j];
1730  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1731  } else {
1732  error = ost->error[j];
1733  scale = enc->width * enc->height * 255.0 * 255.0;
1734  }
1735  if (j)
1736  scale /= 4;
1737  error_sum += error;
1738  scale_sum += scale;
1739  p = psnr(error / scale);
1740  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1741  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1742  ost->file_index, ost->index, type[j] | 32, p);
1743  }
1744  p = psnr(error_sum / scale_sum);
1745  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1746  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1747  ost->file_index, ost->index, p);
1748  }
1749  vid = 1;
1750  }
1751  /* compute min output value */
1753  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1754  ost->st->time_base, AV_TIME_BASE_Q));
1755  if (is_last_report)
1756  nb_frames_drop += ost->last_dropped;
1757  }
1758 
1759  secs = FFABS(pts) / AV_TIME_BASE;
1760  us = FFABS(pts) % AV_TIME_BASE;
1761  mins = secs / 60;
1762  secs %= 60;
1763  hours = mins / 60;
1764  mins %= 60;
1765  hours_sign = (pts < 0) ? "-" : "";
1766 
1767  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1768  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1769 
1770  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1771  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1772  if (pts == AV_NOPTS_VALUE) {
1773  av_bprintf(&buf, "N/A ");
1774  } else {
1775  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1776  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1777  }
1778 
1779  if (bitrate < 0) {
1780  av_bprintf(&buf, "bitrate=N/A");
1781  av_bprintf(&buf_script, "bitrate=N/A\n");
1782  }else{
1783  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1784  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1785  }
1786 
1787  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1788  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1789  if (pts == AV_NOPTS_VALUE) {
1790  av_bprintf(&buf_script, "out_time_us=N/A\n");
1791  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1792  av_bprintf(&buf_script, "out_time=N/A\n");
1793  } else {
1794  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1795  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1796  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1797  hours_sign, hours, mins, secs, us);
1798  }
1799 
1801  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1802  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1803  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1804 
1805  if (speed < 0) {
1806  av_bprintf(&buf, " speed=N/A");
1807  av_bprintf(&buf_script, "speed=N/A\n");
1808  } else {
1809  av_bprintf(&buf, " speed=%4.3gx", speed);
1810  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1811  }
1812 
1813  if (print_stats || is_last_report) {
1814  const char end = is_last_report ? '\n' : '\r';
1815  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1816  fprintf(stderr, "%s %c", buf.str, end);
1817  } else
1818  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1819 
1820  fflush(stderr);
1821  }
1822  av_bprint_finalize(&buf, NULL);
1823 
1824  if (progress_avio) {
1825  av_bprintf(&buf_script, "progress=%s\n",
1826  is_last_report ? "end" : "continue");
1827  avio_write(progress_avio, buf_script.str,
1828  FFMIN(buf_script.len, buf_script.size - 1));
1829  avio_flush(progress_avio);
1830  av_bprint_finalize(&buf_script, NULL);
1831  if (is_last_report) {
1832  if ((ret = avio_closep(&progress_avio)) < 0)
1834  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1835  }
1836  }
1837 
1838  if (is_last_report)
1839  print_final_stats(total_size);
1840 }
1841 
1843 {
1844  // We never got any input. Set a fake format, which will
1845  // come from libavformat.
1846  ifilter->format = par->format;
1847  ifilter->sample_rate = par->sample_rate;
1848  ifilter->channels = par->channels;
1849  ifilter->channel_layout = par->channel_layout;
1850  ifilter->width = par->width;
1851  ifilter->height = par->height;
1852  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1853 }
1854 
1855 static void flush_encoders(void)
1856 {
1857  int i, ret;
1858 
1859  for (i = 0; i < nb_output_streams; i++) {
1860  OutputStream *ost = output_streams[i];
1861  AVCodecContext *enc = ost->enc_ctx;
1862  OutputFile *of = output_files[ost->file_index];
1863 
1864  if (!ost->encoding_needed)
1865  continue;
1866 
1867  // Try to enable encoding with no input frames.
1868  // Maybe we should just let encoding fail instead.
1869  if (!ost->initialized) {
1870  FilterGraph *fg = ost->filter->graph;
1871  char error[1024] = "";
1872 
1874  "Finishing stream %d:%d without any data written to it.\n",
1875  ost->file_index, ost->st->index);
1876 
1877  if (ost->filter && !fg->graph) {
1878  int x;
1879  for (x = 0; x < fg->nb_inputs; x++) {
1880  InputFilter *ifilter = fg->inputs[x];
1881  if (ifilter->format < 0)
1882  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1883  }
1884 
1886  continue;
1887 
1888  ret = configure_filtergraph(fg);
1889  if (ret < 0) {
1890  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1891  exit_program(1);
1892  }
1893 
1894  finish_output_stream(ost);
1895  }
1896 
1897  ret = init_output_stream(ost, error, sizeof(error));
1898  if (ret < 0) {
1899  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1900  ost->file_index, ost->index, error);
1901  exit_program(1);
1902  }
1903  }
1904 
1905  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1906  continue;
1907 
1909  continue;
1910 
1911  for (;;) {
1912  const char *desc = NULL;
1913  AVPacket pkt;
1914  int pkt_size;
1915 
1916  switch (enc->codec_type) {
1917  case AVMEDIA_TYPE_AUDIO:
1918  desc = "audio";
1919  break;
1920  case AVMEDIA_TYPE_VIDEO:
1921  desc = "video";
1922  break;
1923  default:
1924  av_assert0(0);
1925  }
1926 
1927  av_init_packet(&pkt);
1928  pkt.data = NULL;
1929  pkt.size = 0;
1930 
1932 
1933  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1934  ret = avcodec_send_frame(enc, NULL);
1935  if (ret < 0) {
1936  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1937  desc,
1938  av_err2str(ret));
1939  exit_program(1);
1940  }
1941  }
1942 
1943  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1944  if (ret < 0 && ret != AVERROR_EOF) {
1945  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1946  desc,
1947  av_err2str(ret));
1948  exit_program(1);
1949  }
1950  if (ost->logfile && enc->stats_out) {
1951  fprintf(ost->logfile, "%s", enc->stats_out);
1952  }
1953  if (ret == AVERROR_EOF) {
1954  output_packet(of, &pkt, ost, 1);
1955  break;
1956  }
1957  if (ost->finished & MUXER_FINISHED) {
1958  av_packet_unref(&pkt);
1959  continue;
1960  }
1961  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1962  pkt_size = pkt.size;
1963  output_packet(of, &pkt, ost, 0);
1965  do_video_stats(ost, pkt_size);
1966  }
1967  }
1968  }
1969 }
1970 
1971 /*
1972  * Check whether a packet from ist should be written into ost at this time
1973  */
1975 {
1976  OutputFile *of = output_files[ost->file_index];
1977  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1978 
1979  if (ost->source_index != ist_index)
1980  return 0;
1981 
1982  if (ost->finished)
1983  return 0;
1984 
1985  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1986  return 0;
1987 
1988  return 1;
1989 }
1990 
1992 {
1993  OutputFile *of = output_files[ost->file_index];
1994  InputFile *f = input_files [ist->file_index];
1995  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1996  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997  AVPacket opkt = { 0 };
1998 
1999  av_init_packet(&opkt);
2000 
2001  // EOF: flush output bitstream filters.
2002  if (!pkt) {
2003  output_packet(of, &opkt, ost, 1);
2004  return;
2005  }
2006 
2007  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2009  return;
2010 
2011  if (!ost->frame_number && !ost->copy_prior_start) {
2012  int64_t comp_start = start_time;
2013  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2014  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2015  if (pkt->pts == AV_NOPTS_VALUE ?
2016  ist->pts < comp_start :
2017  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2018  return;
2019  }
2020 
2021  if (of->recording_time != INT64_MAX &&
2022  ist->pts >= of->recording_time + start_time) {
2023  close_output_stream(ost);
2024  return;
2025  }
2026 
2027  if (f->recording_time != INT64_MAX) {
2028  start_time = f->ctx->start_time;
2029  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2030  start_time += f->start_time;
2031  if (ist->pts >= f->recording_time + start_time) {
2032  close_output_stream(ost);
2033  return;
2034  }
2035  }
2036 
2037  /* force the input stream PTS */
2038  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2039  ost->sync_opts++;
2040 
2041  if (pkt->pts != AV_NOPTS_VALUE)
2042  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2043  else
2044  opkt.pts = AV_NOPTS_VALUE;
2045 
2046  if (pkt->dts == AV_NOPTS_VALUE)
2047  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2048  else
2049  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2050  opkt.dts -= ost_tb_start_time;
2051 
2052  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2054  if(!duration)
2055  duration = ist->dec_ctx->frame_size;
2056  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2058  ost->mux_timebase) - ost_tb_start_time;
2059  }
2060 
2061  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2062 
2063  opkt.flags = pkt->flags;
2064 
2065  if (pkt->buf) {
2066  opkt.buf = av_buffer_ref(pkt->buf);
2067  if (!opkt.buf)
2068  exit_program(1);
2069  }
2070  opkt.data = pkt->data;
2071  opkt.size = pkt->size;
2072 
2073  av_copy_packet_side_data(&opkt, pkt);
2074 
2075  output_packet(of, &opkt, ost, 0);
2076 }
2077 
2079 {
2080  AVCodecContext *dec = ist->dec_ctx;
2081 
2082  if (!dec->channel_layout) {
2083  char layout_name[256];
2084 
2085  if (dec->channels > ist->guess_layout_max)
2086  return 0;
2088  if (!dec->channel_layout)
2089  return 0;
2090  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2091  dec->channels, dec->channel_layout);
2092  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2093  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2094  }
2095  return 1;
2096 }
2097 
2098 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2099 {
2100  if (*got_output || ret<0)
2101  decode_error_stat[ret<0] ++;
2102 
2103  if (ret < 0 && exit_on_error)
2104  exit_program(1);
2105 
2106  if (*got_output && ist) {
2109  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2110  if (exit_on_error)
2111  exit_program(1);
2112  }
2113  }
2114 }
2115 
2116 // Filters can be configured only if the formats of all inputs are known.
2118 {
2119  int i;
2120  for (i = 0; i < fg->nb_inputs; i++) {
2121  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2122  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2123  return 0;
2124  }
2125  return 1;
2126 }
2127 
2129 {
2130  FilterGraph *fg = ifilter->graph;
2131  int need_reinit, ret, i;
2132 
2133  /* determine if the parameters for this input changed */
2134  need_reinit = ifilter->format != frame->format;
2135 
2136  switch (ifilter->ist->st->codecpar->codec_type) {
2137  case AVMEDIA_TYPE_AUDIO:
2138  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2139  ifilter->channels != frame->channels ||
2140  ifilter->channel_layout != frame->channel_layout;
2141  break;
2142  case AVMEDIA_TYPE_VIDEO:
2143  need_reinit |= ifilter->width != frame->width ||
2144  ifilter->height != frame->height;
2145  break;
2146  }
2147 
2148  if (!ifilter->ist->reinit_filters && fg->graph)
2149  need_reinit = 0;
2150 
2151  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2152  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2153  need_reinit = 1;
2154 
2155  if (need_reinit) {
2156  ret = ifilter_parameters_from_frame(ifilter, frame);
2157  if (ret < 0)
2158  return ret;
2159  }
2160 
2161  /* (re)init the graph if possible, otherwise buffer the frame and return */
2162  if (need_reinit || !fg->graph) {
2163  for (i = 0; i < fg->nb_inputs; i++) {
2164  if (!ifilter_has_all_input_formats(fg)) {
2165  AVFrame *tmp = av_frame_clone(frame);
2166  if (!tmp)
2167  return AVERROR(ENOMEM);
2168  av_frame_unref(frame);
2169 
2170  if (!av_fifo_space(ifilter->frame_queue)) {
2171  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2172  if (ret < 0) {
2173  av_frame_free(&tmp);
2174  return ret;
2175  }
2176  }
2177  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2178  return 0;
2179  }
2180  }
2181 
2182  ret = reap_filters(1);
2183  if (ret < 0 && ret != AVERROR_EOF) {
2184  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2185  return ret;
2186  }
2187 
2188  ret = configure_filtergraph(fg);
2189  if (ret < 0) {
2190  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2191  return ret;
2192  }
2193  }
2194 
2196  if (ret < 0) {
2197  if (ret != AVERROR_EOF)
2198  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2199  return ret;
2200  }
2201 
2202  return 0;
2203 }
2204 
2205 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2206 {
2207  int ret;
2208 
2209  ifilter->eof = 1;
2210 
2211  if (ifilter->filter) {
2212  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2213  if (ret < 0)
2214  return ret;
2215  } else {
2216  // the filtergraph was never configured
2217  if (ifilter->format < 0)
2218  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2219  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2220  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2221  return AVERROR_INVALIDDATA;
2222  }
2223  }
2224 
2225  return 0;
2226 }
2227 
2228 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2229 // There is the following difference: if you got a frame, you must call
2230 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2231 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2232 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2233 {
2234  int ret;
2235 
2236  *got_frame = 0;
2237 
2238  if (pkt) {
2239  ret = avcodec_send_packet(avctx, pkt);
2240  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2241  // decoded frames with avcodec_receive_frame() until done.
2242  if (ret < 0 && ret != AVERROR_EOF)
2243  return ret;
2244  }
2245 
2246  ret = avcodec_receive_frame(avctx, frame);
2247  if (ret < 0 && ret != AVERROR(EAGAIN))
2248  return ret;
2249  if (ret >= 0)
2250  *got_frame = 1;
2251 
2252  return 0;
2253 }
2254 
2255 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2256 {
2257  int i, ret;
2258  AVFrame *f;
2259 
2260  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2261  for (i = 0; i < ist->nb_filters; i++) {
2262  if (i < ist->nb_filters - 1) {
2263  f = ist->filter_frame;
2264  ret = av_frame_ref(f, decoded_frame);
2265  if (ret < 0)
2266  break;
2267  } else
2268  f = decoded_frame;
2269  ret = ifilter_send_frame(ist->filters[i], f);
2270  if (ret == AVERROR_EOF)
2271  ret = 0; /* ignore */
2272  if (ret < 0) {
2274  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2275  break;
2276  }
2277  }
2278  return ret;
2279 }
2280 
2281 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2282  int *decode_failed)
2283 {
2284  AVFrame *decoded_frame;
2285  AVCodecContext *avctx = ist->dec_ctx;
2286  int ret, err = 0;
2287  AVRational decoded_frame_tb;
2288 
2289  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2290  return AVERROR(ENOMEM);
2291  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2292  return AVERROR(ENOMEM);
2293  decoded_frame = ist->decoded_frame;
2294 
2296  ret = decode(avctx, decoded_frame, got_output, pkt);
2297  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2298  if (ret < 0)
2299  *decode_failed = 1;
2300 
2301  if (ret >= 0 && avctx->sample_rate <= 0) {
2302  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2303  ret = AVERROR_INVALIDDATA;
2304  }
2305 
2306  if (ret != AVERROR_EOF)
2307  check_decode_result(ist, got_output, ret);
2308 
2309  if (!*got_output || ret < 0)
2310  return ret;
2311 
2312  ist->samples_decoded += decoded_frame->nb_samples;
2313  ist->frames_decoded++;
2314 
2315  /* increment next_dts to use for the case where the input stream does not
2316  have timestamps or there are multiple frames in the packet */
2317  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2318  avctx->sample_rate;
2319  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320  avctx->sample_rate;
2321 
2322  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2323  decoded_frame_tb = ist->st->time_base;
2324  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2325  decoded_frame->pts = pkt->pts;
2326  decoded_frame_tb = ist->st->time_base;
2327  }else {
2328  decoded_frame->pts = ist->dts;
2329  decoded_frame_tb = AV_TIME_BASE_Q;
2330  }
2331  if (decoded_frame->pts != AV_NOPTS_VALUE)
2332  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2333  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2334  (AVRational){1, avctx->sample_rate});
2335  ist->nb_samples = decoded_frame->nb_samples;
2336  err = send_frame_to_filters(ist, decoded_frame);
2337 
2338  av_frame_unref(ist->filter_frame);
2339  av_frame_unref(decoded_frame);
2340  return err < 0 ? err : ret;
2341 }
2342 
2343 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2344  int *decode_failed)
2345 {
2346  AVFrame *decoded_frame;
2347  int i, ret = 0, err = 0;
2348  int64_t best_effort_timestamp;
2349  int64_t dts = AV_NOPTS_VALUE;
2350  AVPacket avpkt;
2351 
2352  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2353  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2354  // skip the packet.
2355  if (!eof && pkt && pkt->size == 0)
2356  return 0;
2357 
2358  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2359  return AVERROR(ENOMEM);
2360  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2361  return AVERROR(ENOMEM);
2362  decoded_frame = ist->decoded_frame;
2363  if (ist->dts != AV_NOPTS_VALUE)
2364  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2365  if (pkt) {
2366  avpkt = *pkt;
2367  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2368  }
2369 
2370  // The old code used to set dts on the drain packet, which does not work
2371  // with the new API anymore.
2372  if (eof) {
2373  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2374  if (!new)
2375  return AVERROR(ENOMEM);
2376  ist->dts_buffer = new;
2377  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2378  }
2379 
2381  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2382  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2383  if (ret < 0)
2384  *decode_failed = 1;
2385 
2386  // The following line may be required in some cases where there is no parser
2387  // or the parser does not has_b_frames correctly
2388  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2389  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2390  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2391  } else
2393  "video_delay is larger in decoder than demuxer %d > %d.\n"
2394  "If you want to help, upload a sample "
2395  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2396  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2397  ist->dec_ctx->has_b_frames,
2398  ist->st->codecpar->video_delay);
2399  }
2400 
2401  if (ret != AVERROR_EOF)
2402  check_decode_result(ist, got_output, ret);
2403 
2404  if (*got_output && ret >= 0) {
2405  if (ist->dec_ctx->width != decoded_frame->width ||
2406  ist->dec_ctx->height != decoded_frame->height ||
2407  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2408  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2409  decoded_frame->width,
2410  decoded_frame->height,
2411  decoded_frame->format,
2412  ist->dec_ctx->width,
2413  ist->dec_ctx->height,
2414  ist->dec_ctx->pix_fmt);
2415  }
2416  }
2417 
2418  if (!*got_output || ret < 0)
2419  return ret;
2420 
2421  if(ist->top_field_first>=0)
2422  decoded_frame->top_field_first = ist->top_field_first;
2423 
2424  ist->frames_decoded++;
2425 
2426  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2427  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2428  if (err < 0)
2429  goto fail;
2430  }
2431  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2432 
2433  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2434  *duration_pts = decoded_frame->pkt_duration;
2435 
2436  if (ist->framerate.num)
2437  best_effort_timestamp = ist->cfr_next_pts++;
2438 
2439  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2440  best_effort_timestamp = ist->dts_buffer[0];
2441 
2442  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2443  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2444  ist->nb_dts_buffer--;
2445  }
2446 
2447  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2448  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2449 
2450  if (ts != AV_NOPTS_VALUE)
2451  ist->next_pts = ist->pts = ts;
2452  }
2453 
2454  if (debug_ts) {
2455  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2456  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2457  ist->st->index, av_ts2str(decoded_frame->pts),
2458  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2459  best_effort_timestamp,
2460  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2461  decoded_frame->key_frame, decoded_frame->pict_type,
2462  ist->st->time_base.num, ist->st->time_base.den);
2463  }
2464 
2465  if (ist->st->sample_aspect_ratio.num)
2466  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2467 
2468  err = send_frame_to_filters(ist, decoded_frame);
2469 
2470 fail:
2472  av_frame_unref(decoded_frame);
2473  return err < 0 ? err : ret;
2474 }
2475 
2476 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2477  int *decode_failed)
2478 {
2479  AVSubtitle subtitle;
2480  int free_sub = 1;
2481  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2482  &subtitle, got_output, pkt);
2483 
2484  check_decode_result(NULL, got_output, ret);
2485 
2486  if (ret < 0 || !*got_output) {
2487  *decode_failed = 1;
2488  if (!pkt->size)
2489  sub2video_flush(ist);
2490  return ret;
2491  }
2492 
2493  if (ist->fix_sub_duration) {
2494  int end = 1;
2495  if (ist->prev_sub.got_output) {
2496  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2497  1000, AV_TIME_BASE);
2498  if (end < ist->prev_sub.subtitle.end_display_time) {
2499  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2500  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2502  end <= 0 ? ", dropping it" : "");
2504  }
2505  }
2506  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2507  FFSWAP(int, ret, ist->prev_sub.ret);
2508  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2509  if (end <= 0)
2510  goto out;
2511  }
2512 
2513  if (!*got_output)
2514  return ret;
2515 
2516  if (ist->sub2video.frame) {
2517  sub2video_update(ist, &subtitle);
2518  } else if (ist->nb_filters) {
2519  if (!ist->sub2video.sub_queue)
2520  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2521  if (!ist->sub2video.sub_queue)
2522  exit_program(1);
2523  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2525  if (ret < 0)
2526  exit_program(1);
2527  }
2528  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2529  free_sub = 0;
2530  }
2531 
2532  if (!subtitle.num_rects)
2533  goto out;
2534 
2535  ist->frames_decoded++;
2536 
2537  for (i = 0; i < nb_output_streams; i++) {
2538  OutputStream *ost = output_streams[i];
2539 
2540  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2541  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2542  continue;
2543 
2544  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2545  }
2546 
2547 out:
2548  if (free_sub)
2549  avsubtitle_free(&subtitle);
2550  return ret;
2551 }
2552 
2554 {
2555  int i, ret;
2556  /* TODO keep pts also in stream time base to avoid converting back */
2557  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2559 
2560  for (i = 0; i < ist->nb_filters; i++) {
2561  ret = ifilter_send_eof(ist->filters[i], pts);
2562  if (ret < 0)
2563  return ret;
2564  }
2565  return 0;
2566 }
2567 
2568 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2569 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2570 {
2571  int ret = 0, i;
2572  int repeating = 0;
2573  int eof_reached = 0;
2574 
2575  AVPacket avpkt;
2576  if (!ist->saw_first_ts) {
2577  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2578  ist->pts = 0;
2579  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2580  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2581  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2582  }
2583  ist->saw_first_ts = 1;
2584  }
2585 
2586  if (ist->next_dts == AV_NOPTS_VALUE)
2587  ist->next_dts = ist->dts;
2588  if (ist->next_pts == AV_NOPTS_VALUE)
2589  ist->next_pts = ist->pts;
2590 
2591  if (!pkt) {
2592  /* EOF handling */
2593  av_init_packet(&avpkt);
2594  avpkt.data = NULL;
2595  avpkt.size = 0;
2596  } else {
2597  avpkt = *pkt;
2598  }
2599 
2600  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2601  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2602  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2603  ist->next_pts = ist->pts = ist->dts;
2604  }
2605 
2606  // while we have more to decode or while the decoder did output something on EOF
2607  while (ist->decoding_needed) {
2608  int64_t duration_dts = 0;
2609  int64_t duration_pts = 0;
2610  int got_output = 0;
2611  int decode_failed = 0;
2612 
2613  ist->pts = ist->next_pts;
2614  ist->dts = ist->next_dts;
2615 
2616  switch (ist->dec_ctx->codec_type) {
2617  case AVMEDIA_TYPE_AUDIO:
2618  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2619  &decode_failed);
2620  break;
2621  case AVMEDIA_TYPE_VIDEO:
2622  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2623  &decode_failed);
2624  if (!repeating || !pkt || got_output) {
2625  if (pkt && pkt->duration) {
2626  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2627  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2629  duration_dts = ((int64_t)AV_TIME_BASE *
2630  ist->dec_ctx->framerate.den * ticks) /
2632  }
2633 
2634  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2635  ist->next_dts += duration_dts;
2636  }else
2637  ist->next_dts = AV_NOPTS_VALUE;
2638  }
2639 
2640  if (got_output) {
2641  if (duration_pts > 0) {
2642  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2643  } else {
2644  ist->next_pts += duration_dts;
2645  }
2646  }
2647  break;
2648  case AVMEDIA_TYPE_SUBTITLE:
2649  if (repeating)
2650  break;
2651  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2652  if (!pkt && ret >= 0)
2653  ret = AVERROR_EOF;
2654  break;
2655  default:
2656  return -1;
2657  }
2658 
2659  if (ret == AVERROR_EOF) {
2660  eof_reached = 1;
2661  break;
2662  }
2663 
2664  if (ret < 0) {
2665  if (decode_failed) {
2666  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2667  ist->file_index, ist->st->index, av_err2str(ret));
2668  } else {
2669  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2670  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2671  }
2672  if (!decode_failed || exit_on_error)
2673  exit_program(1);
2674  break;
2675  }
2676 
2677  if (got_output)
2678  ist->got_output = 1;
2679 
2680  if (!got_output)
2681  break;
2682 
2683  // During draining, we might get multiple output frames in this loop.
2684  // ffmpeg.c does not drain the filter chain on configuration changes,
2685  // which means if we send multiple frames at once to the filters, and
2686  // one of those frames changes configuration, the buffered frames will
2687  // be lost. This can upset certain FATE tests.
2688  // Decode only 1 frame per call on EOF to appease these FATE tests.
2689  // The ideal solution would be to rewrite decoding to use the new
2690  // decoding API in a better way.
2691  if (!pkt)
2692  break;
2693 
2694  repeating = 1;
2695  }
2696 
2697  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2698  /* except when looping we need to flush but not to send an EOF */
2699  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2700  int ret = send_filter_eof(ist);
2701  if (ret < 0) {
2702  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2703  exit_program(1);
2704  }
2705  }
2706 
2707  /* handle stream copy */
2708  if (!ist->decoding_needed && pkt) {
2709  ist->dts = ist->next_dts;
2710  switch (ist->dec_ctx->codec_type) {
2711  case AVMEDIA_TYPE_AUDIO:
2712  av_assert1(pkt->duration >= 0);
2713  if (ist->dec_ctx->sample_rate) {
2714  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2715  ist->dec_ctx->sample_rate;
2716  } else {
2717  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2718  }
2719  break;
2720  case AVMEDIA_TYPE_VIDEO:
2721  if (ist->framerate.num) {
2722  // TODO: Remove work-around for c99-to-c89 issue 7
2723  AVRational time_base_q = AV_TIME_BASE_Q;
2724  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2725  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2726  } else if (pkt->duration) {
2727  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2728  } else if(ist->dec_ctx->framerate.num != 0) {
2729  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2730  ist->next_dts += ((int64_t)AV_TIME_BASE *
2731  ist->dec_ctx->framerate.den * ticks) /
2733  }
2734  break;
2735  }
2736  ist->pts = ist->dts;
2737  ist->next_pts = ist->next_dts;
2738  }
2739  for (i = 0; i < nb_output_streams; i++) {
2740  OutputStream *ost = output_streams[i];
2741 
2742  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2743  continue;
2744 
2745  do_streamcopy(ist, ost, pkt);
2746  }
2747 
2748  return !eof_reached;
2749 }
2750 
2751 static void print_sdp(void)
2752 {
2753  char sdp[16384];
2754  int i;
2755  int j;
2756  AVIOContext *sdp_pb;
2757  AVFormatContext **avc;
2758 
2759  for (i = 0; i < nb_output_files; i++) {
2760  if (!output_files[i]->header_written)
2761  return;
2762  }
2763 
2764  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2765  if (!avc)
2766  exit_program(1);
2767  for (i = 0, j = 0; i < nb_output_files; i++) {
2768  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2769  avc[j] = output_files[i]->ctx;
2770  j++;
2771  }
2772  }
2773 
2774  if (!j)
2775  goto fail;
2776 
2777  av_sdp_create(avc, j, sdp, sizeof(sdp));
2778 
2779  if (!sdp_filename) {
2780  printf("SDP:\n%s\n", sdp);
2781  fflush(stdout);
2782  } else {
2783  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2784  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2785  } else {
2786  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2787  avio_closep(&sdp_pb);
2789  }
2790  }
2791 
2792 fail:
2793  av_freep(&avc);
2794 }
2795 
2797 {
2798  InputStream *ist = s->opaque;
2799  const enum AVPixelFormat *p;
2800  int ret;
2801 
2802  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2804  const AVCodecHWConfig *config = NULL;
2805  int i;
2806 
2807  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2808  break;
2809 
2810  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2811  ist->hwaccel_id == HWACCEL_AUTO) {
2812  for (i = 0;; i++) {
2813  config = avcodec_get_hw_config(s->codec, i);
2814  if (!config)
2815  break;
2816  if (!(config->methods &
2818  continue;
2819  if (config->pix_fmt == *p)
2820  break;
2821  }
2822  }
2823  if (config) {
2824  if (config->device_type != ist->hwaccel_device_type) {
2825  // Different hwaccel offered, ignore.
2826  continue;
2827  }
2828 
2829  ret = hwaccel_decode_init(s);
2830  if (ret < 0) {
2831  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2833  "%s hwaccel requested for input stream #%d:%d, "
2834  "but cannot be initialized.\n",
2836  ist->file_index, ist->st->index);
2837  return AV_PIX_FMT_NONE;
2838  }
2839  continue;
2840  }
2841  } else {
2842  const HWAccel *hwaccel = NULL;
2843  int i;
2844  for (i = 0; hwaccels[i].name; i++) {
2845  if (hwaccels[i].pix_fmt == *p) {
2846  hwaccel = &hwaccels[i];
2847  break;
2848  }
2849  }
2850  if (!hwaccel) {
2851  // No hwaccel supporting this pixfmt.
2852  continue;
2853  }
2854  if (hwaccel->id != ist->hwaccel_id) {
2855  // Does not match requested hwaccel.
2856  continue;
2857  }
2858 
2859  ret = hwaccel->init(s);
2860  if (ret < 0) {
2862  "%s hwaccel requested for input stream #%d:%d, "
2863  "but cannot be initialized.\n", hwaccel->name,
2864  ist->file_index, ist->st->index);
2865  return AV_PIX_FMT_NONE;
2866  }
2867  }
2868 
2869  if (ist->hw_frames_ctx) {
2871  if (!s->hw_frames_ctx)
2872  return AV_PIX_FMT_NONE;
2873  }
2874 
2875  ist->hwaccel_pix_fmt = *p;
2876  break;
2877  }
2878 
2879  return *p;
2880 }
2881 
2883 {
2884  InputStream *ist = s->opaque;
2885 
2886  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2887  return ist->hwaccel_get_buffer(s, frame, flags);
2888 
2889  return avcodec_default_get_buffer2(s, frame, flags);
2890 }
2891 
2892 static int init_input_stream(int ist_index, char *error, int error_len)
2893 {
2894  int ret;
2895  InputStream *ist = input_streams[ist_index];
2896 
2897  if (ist->decoding_needed) {
2898  AVCodec *codec = ist->dec;
2899  if (!codec) {
2900  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2901  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2902  return AVERROR(EINVAL);
2903  }
2904 
2905  ist->dec_ctx->opaque = ist;
2906  ist->dec_ctx->get_format = get_format;
2907  ist->dec_ctx->get_buffer2 = get_buffer;
2908  ist->dec_ctx->thread_safe_callbacks = 1;
2909 
2910  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2911  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2912  (ist->decoding_needed & DECODING_FOR_OST)) {
2913  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2915  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2916  }
2917 
2918  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2919 
2920  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2921  * audio, and video decoders such as cuvid or mediacodec */
2922  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2923 
2924  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2925  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2926  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2928  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2929 
2930  ret = hw_device_setup_for_decode(ist);
2931  if (ret < 0) {
2932  snprintf(error, error_len, "Device setup failed for "
2933  "decoder on input stream #%d:%d : %s",
2934  ist->file_index, ist->st->index, av_err2str(ret));
2935  return ret;
2936  }
2937 
2938  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2939  if (ret == AVERROR_EXPERIMENTAL)
2940  abort_codec_experimental(codec, 0);
2941 
2942  snprintf(error, error_len,
2943  "Error while opening decoder for input stream "
2944  "#%d:%d : %s",
2945  ist->file_index, ist->st->index, av_err2str(ret));
2946  return ret;
2947  }
2949  }
2950 
2951  ist->next_pts = AV_NOPTS_VALUE;
2952  ist->next_dts = AV_NOPTS_VALUE;
2953 
2954  return 0;
2955 }
2956 
2958 {
2959  if (ost->source_index >= 0)
2960  return input_streams[ost->source_index];
2961  return NULL;
2962 }
2963 
2964 static int compare_int64(const void *a, const void *b)
2965 {
2966  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2967 }
2968 
2969 /* open the muxer when all the streams are initialized */
2970 static int check_init_output_file(OutputFile *of, int file_index)
2971 {
2972  int ret, i;
2973 
2974  for (i = 0; i < of->ctx->nb_streams; i++) {
2975  OutputStream *ost = output_streams[of->ost_index + i];
2976  if (!ost->initialized)
2977  return 0;
2978  }
2979 
2980  of->ctx->interrupt_callback = int_cb;
2981 
2982  ret = avformat_write_header(of->ctx, &of->opts);
2983  if (ret < 0) {
2985  "Could not write header for output file #%d "
2986  "(incorrect codec parameters ?): %s\n",
2987  file_index, av_err2str(ret));
2988  return ret;
2989  }
2990  //assert_avoptions(of->opts);
2991  of->header_written = 1;
2992 
2993  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2994 
2995  if (sdp_filename || want_sdp)
2996  print_sdp();
2997 
2998  /* flush the muxing queues */
2999  for (i = 0; i < of->ctx->nb_streams; i++) {
3000  OutputStream *ost = output_streams[of->ost_index + i];
3001 
3002  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3003  if (!av_fifo_size(ost->muxing_queue))
3004  ost->mux_timebase = ost->st->time_base;
3005 
3006  while (av_fifo_size(ost->muxing_queue)) {
3007  AVPacket pkt;
3008  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3009  write_packet(of, &pkt, ost, 1);
3010  }
3011  }
3012 
3013  return 0;
3014 }
3015 
3017 {
3018  AVBSFContext *ctx;
3019  int i, ret;
3020 
3021  if (!ost->nb_bitstream_filters)
3022  return 0;
3023 
3024  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3025  ctx = ost->bsf_ctx[i];
3026 
3027  ret = avcodec_parameters_copy(ctx->par_in,
3028  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3029  if (ret < 0)
3030  return ret;
3031 
3032  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3033 
3034  ret = av_bsf_init(ctx);
3035  if (ret < 0) {
3036  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3037  ost->bsf_ctx[i]->filter->name);
3038  return ret;
3039  }
3040  }
3041 
3042  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3043  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3044  if (ret < 0)
3045  return ret;
3046 
3047  ost->st->time_base = ctx->time_base_out;
3048 
3049  return 0;
3050 }
3051 
3053 {
3054  OutputFile *of = output_files[ost->file_index];
3055  InputStream *ist = get_input_stream(ost);
3056  AVCodecParameters *par_dst = ost->st->codecpar;
3057  AVCodecParameters *par_src = ost->ref_par;
3058  AVRational sar;
3059  int i, ret;
3060  uint32_t codec_tag = par_dst->codec_tag;
3061 
3062  av_assert0(ist && !ost->filter);
3063 
3064  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3065  if (ret >= 0)
3066  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3067  if (ret < 0) {
3069  "Error setting up codec context options.\n");
3070  return ret;
3071  }
3072 
3073  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3074  if (ret < 0) {
3076  "Error getting reference codec parameters.\n");
3077  return ret;
3078  }
3079 
3080  if (!codec_tag) {
3081  unsigned int codec_tag_tmp;
3082  if (!of->ctx->oformat->codec_tag ||
3083  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3084  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3085  codec_tag = par_src->codec_tag;
3086  }
3087 
3088  ret = avcodec_parameters_copy(par_dst, par_src);
3089  if (ret < 0)
3090  return ret;
3091 
3092  par_dst->codec_tag = codec_tag;
3093 
3094  if (!ost->frame_rate.num)
3095  ost->frame_rate = ist->framerate;
3096  ost->st->avg_frame_rate = ost->frame_rate;
3097 
3099  if (ret < 0)
3100  return ret;
3101 
3102  // copy timebase while removing common factors
3103  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3105 
3106  // copy estimated duration as a hint to the muxer
3107  if (ost->st->duration <= 0 && ist->st->duration > 0)
3108  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3109 
3110  // copy disposition
3111  ost->st->disposition = ist->st->disposition;
3112 
3113  if (ist->st->nb_side_data) {
3114  for (i = 0; i < ist->st->nb_side_data; i++) {
3115  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3116  uint8_t *dst_data;
3117 
3118  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3119  if (!dst_data)
3120  return AVERROR(ENOMEM);
3121  memcpy(dst_data, sd_src->data, sd_src->size);
3122  }
3123  }
3124 
3125  if (ost->rotate_overridden) {
3127  sizeof(int32_t) * 9);
3128  if (sd)
3129  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3130  }
3131 
3132  switch (par_dst->codec_type) {
3133  case AVMEDIA_TYPE_AUDIO:
3134  if (audio_volume != 256) {
3135  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3136  exit_program(1);
3137  }
3138  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3139  par_dst->block_align= 0;
3140  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3141  par_dst->block_align= 0;
3142  break;
3143  case AVMEDIA_TYPE_VIDEO:
3144  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3145  sar =
3146  av_mul_q(ost->frame_aspect_ratio,
3147  (AVRational){ par_dst->height, par_dst->width });
3148  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3149  "with stream copy may produce invalid files\n");
3150  }
3151  else if (ist->st->sample_aspect_ratio.num)
3152  sar = ist->st->sample_aspect_ratio;
3153  else
3154  sar = par_src->sample_aspect_ratio;
3155  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3156  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3157  ost->st->r_frame_rate = ist->st->r_frame_rate;
3158  break;
3159  }
3160 
3161  ost->mux_timebase = ist->st->time_base;
3162 
3163  return 0;
3164 }
3165 
3167 {
3168  AVDictionaryEntry *e;
3169 
3170  uint8_t *encoder_string;
3171  int encoder_string_len;
3172  int format_flags = 0;
3173  int codec_flags = ost->enc_ctx->flags;
3174 
3175  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3176  return;
3177 
3178  e = av_dict_get(of->opts, "fflags", NULL, 0);
3179  if (e) {
3180  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3181  if (!o)
3182  return;
3183  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3184  }
3185  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3186  if (e) {
3187  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3188  if (!o)
3189  return;
3190  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3191  }
3192 
3193  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3194  encoder_string = av_mallocz(encoder_string_len);
3195  if (!encoder_string)
3196  exit_program(1);
3197 
3198  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3199  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3200  else
3201  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3202  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3203  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3205 }
3206 
3208  AVCodecContext *avctx)
3209 {
3210  char *p;
3211  int n = 1, i, size, index = 0;
3212  int64_t t, *pts;
3213 
3214  for (p = kf; *p; p++)
3215  if (*p == ',')
3216  n++;
3217  size = n;
3218  pts = av_malloc_array(size, sizeof(*pts));
3219  if (!pts) {
3220  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3221  exit_program(1);
3222  }
3223 
3224  p = kf;
3225  for (i = 0; i < n; i++) {
3226  char *next = strchr(p, ',');
3227 
3228  if (next)
3229  *next++ = 0;
3230 
3231  if (!memcmp(p, "chapters", 8)) {
3232 
3233  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3234  int j;
3235 
3236  if (avf->nb_chapters > INT_MAX - size ||
3237  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3238  sizeof(*pts)))) {
3240  "Could not allocate forced key frames array.\n");
3241  exit_program(1);
3242  }
3243  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3244  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3245 
3246  for (j = 0; j < avf->nb_chapters; j++) {
3247  AVChapter *c = avf->chapters[j];
3248  av_assert1(index < size);
3249  pts[index++] = av_rescale_q(c->start, c->time_base,
3250  avctx->time_base) + t;
3251  }
3252 
3253  } else {
3254 
3255  t = parse_time_or_die("force_key_frames", p, 1);
3256  av_assert1(index < size);
3257  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3258 
3259  }
3260 
3261  p = next;
3262  }
3263 
3264  av_assert0(index == size);
3265  qsort(pts, size, sizeof(*pts), compare_int64);
3266  ost->forced_kf_count = size;
3267  ost->forced_kf_pts = pts;
3268 }
3269 
3270 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3271 {
3272  InputStream *ist = get_input_stream(ost);
3273  AVCodecContext *enc_ctx = ost->enc_ctx;
3274  AVFormatContext *oc;
3275 
3276  if (ost->enc_timebase.num > 0) {
3277  enc_ctx->time_base = ost->enc_timebase;
3278  return;
3279  }
3280 
3281  if (ost->enc_timebase.num < 0) {
3282  if (ist) {
3283  enc_ctx->time_base = ist->st->time_base;
3284  return;
3285  }
3286 
3287  oc = output_files[ost->file_index]->ctx;
3288  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3289  }
3290 
3291  enc_ctx->time_base = default_time_base;
3292 }
3293 
3295 {
3296  InputStream *ist = get_input_stream(ost);
3297  AVCodecContext *enc_ctx = ost->enc_ctx;
3299  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3300  int j, ret;
3301 
3302  set_encoder_id(output_files[ost->file_index], ost);
3303 
3304  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3305  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3306  // which have to be filtered out to prevent leaking them to output files.
3307  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3308 
3309  if (ist) {
3310  ost->st->disposition = ist->st->disposition;
3311 
3312  dec_ctx = ist->dec_ctx;
3313 
3314  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3315  } else {
3316  for (j = 0; j < oc->nb_streams; j++) {
3317  AVStream *st = oc->streams[j];
3318  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3319  break;
3320  }
3321  if (j == oc->nb_streams)
3322  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3325  }
3326 
3327  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3328  if (!ost->frame_rate.num)
3330  if (ist && !ost->frame_rate.num)
3331  ost->frame_rate = ist->framerate;
3332  if (ist && !ost->frame_rate.num)
3333  ost->frame_rate = ist->st->r_frame_rate;
3334  if (ist && !ost->frame_rate.num) {
3335  ost->frame_rate = (AVRational){25, 1};
3337  "No information "
3338  "about the input framerate is available. Falling "
3339  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3340  "if you want a different framerate.\n",
3341  ost->file_index, ost->index);
3342  }
3343 
3344  if (ost->enc->supported_framerates && !ost->force_fps) {
3345  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3346  ost->frame_rate = ost->enc->supported_framerates[idx];
3347  }
3348  // reduce frame rate for mpeg4 to be within the spec limits
3349  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3350  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3351  ost->frame_rate.num, ost->frame_rate.den, 65535);
3352  }
3353  }
3354 
3355  switch (enc_ctx->codec_type) {
3356  case AVMEDIA_TYPE_AUDIO:
3358  if (dec_ctx)
3359  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3360  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3364 
3365  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3366  break;
3367 
3368  case AVMEDIA_TYPE_VIDEO:
3370 
3371  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3373  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3375  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3376  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3377  }
3378  for (j = 0; j < ost->forced_kf_count; j++)
3379  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3381  enc_ctx->time_base);
3382 
3383  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3384  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3385  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3386  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3387  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3389 
3390  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3391  if (dec_ctx)
3392  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3393  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3394 
3395  enc_ctx->framerate = ost->frame_rate;
3396 
3397  ost->st->avg_frame_rate = ost->frame_rate;
3398 
3399  if (!dec_ctx ||
3400  enc_ctx->width != dec_ctx->width ||
3401  enc_ctx->height != dec_ctx->height ||
3402  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3404  }
3405 
3406  if (ost->top_field_first == 0) {
3407  enc_ctx->field_order = AV_FIELD_BB;
3408  } else if (ost->top_field_first == 1) {
3409  enc_ctx->field_order = AV_FIELD_TT;
3410  }
3411 
3412  if (ost->forced_keyframes) {
3413  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3416  if (ret < 0) {
3418  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3419  return ret;
3420  }
3425 
3426  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3427  // parse it only for static kf timings
3428  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3430  }
3431  }
3432  break;
3433  case AVMEDIA_TYPE_SUBTITLE:
3434  enc_ctx->time_base = AV_TIME_BASE_Q;
3435  if (!enc_ctx->width) {
3436  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3437  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3438  }
3439  break;
3440  case AVMEDIA_TYPE_DATA:
3441  break;
3442  default:
3443  abort();
3444  break;
3445  }
3446 
3447  ost->mux_timebase = enc_ctx->time_base;
3448 
3449  return 0;
3450 }
3451 
3452 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3453 {
3454  int ret = 0;
3455 
3456  if (ost->encoding_needed) {
3457  AVCodec *codec = ost->enc;
3458  AVCodecContext *dec = NULL;
3459  InputStream *ist;
3460 
3461  ret = init_output_stream_encode(ost);
3462  if (ret < 0)
3463  return ret;
3464 
3465  if ((ist = get_input_stream(ost)))
3466  dec = ist->dec_ctx;
3467  if (dec && dec->subtitle_header) {
3468  /* ASS code assumes this buffer is null terminated so add extra byte. */
3470  if (!ost->enc_ctx->subtitle_header)
3471  return AVERROR(ENOMEM);
3472  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3474  }
3475  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3476  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3477  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3478  !codec->defaults &&
3479  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3480  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3481  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3482 
3483  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3487  if (!ost->enc_ctx->hw_frames_ctx)
3488  return AVERROR(ENOMEM);
3489  } else {
3490  ret = hw_device_setup_for_encode(ost);
3491  if (ret < 0) {
3492  snprintf(error, error_len, "Device setup failed for "
3493  "encoder on output stream #%d:%d : %s",
3494  ost->file_index, ost->index, av_err2str(ret));
3495  return ret;
3496  }
3497  }
3498  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3499  int input_props = 0, output_props = 0;
3500  AVCodecDescriptor const *input_descriptor =
3502  AVCodecDescriptor const *output_descriptor =
3504  if (input_descriptor)
3505  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3506  if (output_descriptor)
3507  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3508  if (input_props && output_props && input_props != output_props) {
3509  snprintf(error, error_len,
3510  "Subtitle encoding currently only possible from text to text "
3511  "or bitmap to bitmap");
3512  return AVERROR_INVALIDDATA;
3513  }
3514  }
3515 
3516  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3517  if (ret == AVERROR_EXPERIMENTAL)
3518  abort_codec_experimental(codec, 1);
3519  snprintf(error, error_len,
3520  "Error while opening encoder for output stream #%d:%d - "
3521  "maybe incorrect parameters such as bit_rate, rate, width or height",
3522  ost->file_index, ost->index);
3523  return ret;
3524  }
3525  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3526  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3528  ost->enc_ctx->frame_size);
3530  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3531  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3532  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3533  " It takes bits/s as argument, not kbits/s\n");
3534 
3536  if (ret < 0) {
3538  "Error initializing the output stream codec context.\n");
3539  exit_program(1);
3540  }
3541  /*
3542  * FIXME: ost->st->codec should't be needed here anymore.
3543  */
3544  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3545  if (ret < 0)
3546  return ret;
3547 
3548  if (ost->enc_ctx->nb_coded_side_data) {
3549  int i;
3550 
3551  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3552  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3553  uint8_t *dst_data;
3554 
3555  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3556  if (!dst_data)
3557  return AVERROR(ENOMEM);
3558  memcpy(dst_data, sd_src->data, sd_src->size);
3559  }
3560  }
3561 
3562  /*
3563  * Add global input side data. For now this is naive, and copies it
3564  * from the input stream's global side data. All side data should
3565  * really be funneled over AVFrame and libavfilter, then added back to
3566  * packet side data, and then potentially using the first packet for
3567  * global side data.
3568  */
3569  if (ist) {
3570  int i;
3571  for (i = 0; i < ist->st->nb_side_data; i++) {
3572  AVPacketSideData *sd = &ist->st->side_data[i];
3573  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3574  if (!dst)
3575  return AVERROR(ENOMEM);
3576  memcpy(dst, sd->data, sd->size);
3577  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3578  av_display_rotation_set((uint32_t *)dst, 0);
3579  }
3580  }
3581 
3582  // copy timebase while removing common factors
3583  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3584  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3585 
3586  // copy estimated duration as a hint to the muxer
3587  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3588  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3589 
3590  ost->st->codec->codec= ost->enc_ctx->codec;
3591  } else if (ost->stream_copy) {
3593  if (ret < 0)
3594  return ret;
3595  }
3596 
3597  // parse user provided disposition, and update stream values
3598  if (ost->disposition) {
3599  static const AVOption opts[] = {
3600  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3601  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3602  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3603  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3604  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3605  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3606  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3607  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3608  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3609  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3610  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3611  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3612  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3613  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3614  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3615  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3616  { NULL },
3617  };
3618  static const AVClass class = {
3619  .class_name = "",
3620  .item_name = av_default_item_name,
3621  .option = opts,
3622  .version = LIBAVUTIL_VERSION_INT,
3623  };
3624  const AVClass *pclass = &class;
3625 
3626  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3627  if (ret < 0)
3628  return ret;
3629  }
3630 
3631  /* initialize bitstream filters for the output stream
3632  * needs to be done here, because the codec id for streamcopy is not
3633  * known until now */
3634  ret = init_output_bsfs(ost);
3635  if (ret < 0)
3636  return ret;
3637 
3638  ost->initialized = 1;
3639 
3640  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3641  if (ret < 0)
3642  return ret;
3643 
3644  return ret;
3645 }
3646 
3647 static void report_new_stream(int input_index, AVPacket *pkt)
3648 {
3649  InputFile *file = input_files[input_index];
3650  AVStream *st = file->ctx->streams[pkt->stream_index];
3651 
3652  if (pkt->stream_index < file->nb_streams_warn)
3653  return;
3654  av_log(file->ctx, AV_LOG_WARNING,
3655  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3657  input_index, pkt->stream_index,
3658  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3659  file->nb_streams_warn = pkt->stream_index + 1;
3660 }
3661 
3662 static int transcode_init(void)
3663 {
3664  int ret = 0, i, j, k;
3665  AVFormatContext *oc;
3666  OutputStream *ost;
3667  InputStream *ist;
3668  char error[1024] = {0};
3669 
3670  for (i = 0; i < nb_filtergraphs; i++) {
3671  FilterGraph *fg = filtergraphs[i];
3672  for (j = 0; j < fg->nb_outputs; j++) {
3673  OutputFilter *ofilter = fg->outputs[j];
3674  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3675  continue;
3676  if (fg->nb_inputs != 1)
3677  continue;
3678  for (k = nb_input_streams-1; k >= 0 ; k--)
3679  if (fg->inputs[0]->ist == input_streams[k])
3680  break;
3681  ofilter->ost->source_index = k;
3682  }
3683  }
3684 
3685  /* init framerate emulation */
3686  for (i = 0; i < nb_input_files; i++) {
3687  InputFile *ifile = input_files[i];
3688  if (ifile->rate_emu)
3689  for (j = 0; j < ifile->nb_streams; j++)
3690  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3691  }
3692 
3693  /* init input streams */
3694  for (i = 0; i < nb_input_streams; i++)
3695  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3696  for (i = 0; i < nb_output_streams; i++) {
3697  ost = output_streams[i];
3698  avcodec_close(ost->enc_ctx);
3699  }
3700  goto dump_format;
3701  }
3702 
3703  /* open each encoder */
3704  for (i = 0; i < nb_output_streams; i++) {
3705  // skip streams fed from filtergraphs until we have a frame for them
3706  if (output_streams[i]->filter)
3707  continue;
3708 
3709  ret = init_output_stream(output_streams[i], error, sizeof(error));
3710  if (ret < 0)
3711  goto dump_format;
3712  }
3713 
3714  /* discard unused programs */
3715  for (i = 0; i < nb_input_files; i++) {
3716  InputFile *ifile = input_files[i];
3717  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3718  AVProgram *p = ifile->ctx->programs[j];
3719  int discard = AVDISCARD_ALL;
3720 
3721  for (k = 0; k < p->nb_stream_indexes; k++)
3722  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3723  discard = AVDISCARD_DEFAULT;
3724  break;
3725  }
3726  p->discard = discard;
3727  }
3728  }
3729 
3730  /* write headers for files with no streams */
3731  for (i = 0; i < nb_output_files; i++) {
3732  oc = output_files[i]->ctx;
3733  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3734  ret = check_init_output_file(output_files[i], i);
3735  if (ret < 0)
3736  goto dump_format;
3737  }
3738  }
3739 
3740  dump_format:
3741  /* dump the stream mapping */
3742  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3743  for (i = 0; i < nb_input_streams; i++) {
3744  ist = input_streams[i];
3745 
3746  for (j = 0; j < ist->nb_filters; j++) {
3747  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3748  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3749  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3750  ist->filters[j]->name);
3751  if (nb_filtergraphs > 1)
3752  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3753  av_log(NULL, AV_LOG_INFO, "\n");
3754  }
3755  }
3756  }
3757 
3758  for (i = 0; i < nb_output_streams; i++) {
3759  ost = output_streams[i];
3760 
3761  if (ost->attachment_filename) {
3762  /* an attached file */
3763  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3764  ost->attachment_filename, ost->file_index, ost->index);
3765  continue;
3766  }
3767 
3768  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3769  /* output from a complex graph */
3770  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3771  if (nb_filtergraphs > 1)
3772  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3773 
3774  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3775  ost->index, ost->enc ? ost->enc->name : "?");
3776  continue;
3777  }
3778 
3779  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3780  input_streams[ost->source_index]->file_index,
3781  input_streams[ost->source_index]->st->index,
3782  ost->file_index,
3783  ost->index);
3784  if (ost->sync_ist != input_streams[ost->source_index])
3785  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3786  ost->sync_ist->file_index,
3787  ost->sync_ist->st->index);
3788  if (ost->stream_copy)
3789  av_log(NULL, AV_LOG_INFO, " (copy)");
3790  else {
3791  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3792  const AVCodec *out_codec = ost->enc;
3793  const char *decoder_name = "?";
3794  const char *in_codec_name = "?";
3795  const char *encoder_name = "?";
3796  const char *out_codec_name = "?";
3797  const AVCodecDescriptor *desc;
3798 
3799  if (in_codec) {
3800  decoder_name = in_codec->name;
3801  desc = avcodec_descriptor_get(in_codec->id);
3802  if (desc)
3803  in_codec_name = desc->name;
3804  if (!strcmp(decoder_name, in_codec_name))
3805  decoder_name = "native";
3806  }
3807 
3808  if (out_codec) {
3809  encoder_name = out_codec->name;
3810  desc = avcodec_descriptor_get(out_codec->id);
3811  if (desc)
3812  out_codec_name = desc->name;
3813  if (!strcmp(encoder_name, out_codec_name))
3814  encoder_name = "native";
3815  }
3816 
3817  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3818  in_codec_name, decoder_name,
3819  out_codec_name, encoder_name);
3820  }
3821  av_log(NULL, AV_LOG_INFO, "\n");
3822  }
3823 
3824  if (ret) {
3825  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3826  return ret;
3827  }
3828 
3830 
3831  return 0;
3832 }
3833 
3834 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3835 static int need_output(void)
3836 {
3837  int i;
3838 
3839  for (i = 0; i < nb_output_streams; i++) {
3840  OutputStream *ost = output_streams[i];
3841  OutputFile *of = output_files[ost->file_index];
3842  AVFormatContext *os = output_files[ost->file_index]->ctx;
3843 
3844  if (ost->finished ||
3845  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3846  continue;
3847  if (ost->frame_number >= ost->max_frames) {
3848  int j;
3849  for (j = 0; j < of->ctx->nb_streams; j++)
3850  close_output_stream(output_streams[of->ost_index + j]);
3851  continue;
3852  }
3853 
3854  return 1;
3855  }
3856 
3857  return 0;
3858 }
3859 
3860 /**
3861  * Select the output stream to process.
3862  *
3863  * @return selected output stream, or NULL if none available
3864  */
3866 {
3867  int i;
3868  int64_t opts_min = INT64_MAX;
3869  OutputStream *ost_min = NULL;
3870 
3871  for (i = 0; i < nb_output_streams; i++) {
3872  OutputStream *ost = output_streams[i];
3873  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3874  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3875  AV_TIME_BASE_Q);
3876  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3877  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3878 
3879  if (!ost->initialized && !ost->inputs_done)
3880  return ost;
3881 
3882  if (!ost->finished && opts < opts_min) {
3883  opts_min = opts;
3884  ost_min = ost->unavailable ? NULL : ost;
3885  }
3886  }
3887  return ost_min;
3888 }
3889 
3890 static void set_tty_echo(int on)
3891 {
3892 #if HAVE_TERMIOS_H
3893  struct termios tty;
3894  if (tcgetattr(0, &tty) == 0) {
3895  if (on) tty.c_lflag |= ECHO;
3896  else tty.c_lflag &= ~ECHO;
3897  tcsetattr(0, TCSANOW, &tty);
3898  }
3899 #endif
3900 }
3901 
3902 static int check_keyboard_interaction(int64_t cur_time)
3903 {
3904  int i, ret, key;
3905  static int64_t last_time;
3906  if (received_nb_signals)
3907  return AVERROR_EXIT;
3908  /* read_key() returns 0 on EOF */
3909  if(cur_time - last_time >= 100000 && !run_as_daemon){
3910  key = read_key();
3911  last_time = cur_time;
3912  }else
3913  key = -1;
3914  if (key == 'q')
3915  return AVERROR_EXIT;
3916  if (key == '+') av_log_set_level(av_log_get_level()+10);
3917  if (key == '-') av_log_set_level(av_log_get_level()-10);
3918  if (key == 's') qp_hist ^= 1;
3919  if (key == 'h'){
3920  if (do_hex_dump){
3921  do_hex_dump = do_pkt_dump = 0;
3922  } else if(do_pkt_dump){
3923  do_hex_dump = 1;
3924  } else
3925  do_pkt_dump = 1;
3927  }
3928  if (key == 'c' || key == 'C'){
3929  char buf[4096], target[64], command[256], arg[256] = {0};
3930  double time;
3931  int k, n = 0;
3932  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3933  i = 0;
3934  set_tty_echo(1);
3935  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3936  if (k > 0)
3937  buf[i++] = k;
3938  buf[i] = 0;
3939  set_tty_echo(0);
3940  fprintf(stderr, "\n");
3941  if (k > 0 &&
3942  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3943  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3944  target, time, command, arg);
3945  for (i = 0; i < nb_filtergraphs; i++) {
3946  FilterGraph *fg = filtergraphs[i];
3947  if (fg->graph) {
3948  if (time < 0) {
3949  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3950  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3951  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3952  } else if (key == 'c') {
3953  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3954  ret = AVERROR_PATCHWELCOME;
3955  } else {
3956  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3957  if (ret < 0)
3958  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3959  }
3960  }
3961  }
3962  } else {
3964  "Parse error, at least 3 arguments were expected, "
3965  "only %d given in string '%s'\n", n, buf);
3966  }
3967  }
3968  if (key == 'd' || key == 'D'){
3969  int debug=0;
3970  if(key == 'D') {
3971  debug = input_streams[0]->st->codec->debug<<1;
3972  if(!debug) debug = 1;
3973  while(debug & (FF_DEBUG_DCT_COEFF
3974 #if FF_API_DEBUG_MV
3975  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3976 #endif
3977  )) //unsupported, would just crash
3978  debug += debug;
3979  }else{
3980  char buf[32];
3981  int k = 0;
3982  i = 0;
3983  set_tty_echo(1);
3984  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3985  if (k > 0)
3986  buf[i++] = k;
3987  buf[i] = 0;
3988  set_tty_echo(0);
3989  fprintf(stderr, "\n");
3990  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3991  fprintf(stderr,"error parsing debug value\n");
3992  }
3993  for(i=0;i<nb_input_streams;i++) {
3994  input_streams[i]->st->codec->debug = debug;
3995  }
3996  for(i=0;i<nb_output_streams;i++) {
3997  OutputStream *ost = output_streams[i];
3998  ost->enc_ctx->debug = debug;
3999  }
4000  if(debug) av_log_set_level(AV_LOG_DEBUG);
4001  fprintf(stderr,"debug=%d\n", debug);
4002  }
4003  if (key == '?'){
4004  fprintf(stderr, "key function\n"
4005  "? show this help\n"
4006  "+ increase verbosity\n"
4007  "- decrease verbosity\n"
4008  "c Send command to first matching filter supporting it\n"
4009  "C Send/Queue command to all matching filters\n"
4010  "D cycle through available debug modes\n"
4011  "h dump packets/hex press to cycle through the 3 states\n"
4012  "q quit\n"
4013  "s Show QP histogram\n"
4014  );
4015  }
4016  return 0;
4017 }
4018 
4019 #if HAVE_THREADS
4020 static void *input_thread(void *arg)
4021 {
4022  InputFile *f = arg;
4023  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4024  int ret = 0;
4025 
4026  while (1) {
4027  AVPacket pkt;
4028  ret = av_read_frame(f->ctx, &pkt);
4029 
4030  if (ret == AVERROR(EAGAIN)) {
4031  av_usleep(10000);
4032  continue;
4033  }
4034  if (ret < 0) {
4035  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4036  break;
4037  }
4038  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4039  if (flags && ret == AVERROR(EAGAIN)) {
4040  flags = 0;
4041  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4043  "Thread message queue blocking; consider raising the "
4044  "thread_queue_size option (current value: %d)\n",
4045  f->thread_queue_size);
4046  }
4047  if (ret < 0) {
4048  if (ret != AVERROR_EOF)
4049  av_log(f->ctx, AV_LOG_ERROR,
4050  "Unable to send packet to main thread: %s\n",
4051  av_err2str(ret));
4052  av_packet_unref(&pkt);
4053  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4054  break;
4055  }
4056  }
4057 
4058  return NULL;
4059 }
4060 
4061 static void free_input_thread(int i)
4062 {
4063  InputFile *f = input_files[i];
4064  AVPacket pkt;
4065 
4066  if (!f || !f->in_thread_queue)
4067  return;
4069  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4070  av_packet_unref(&pkt);
4071 
4072  pthread_join(f->thread, NULL);
4073  f->joined = 1;
4074  av_thread_message_queue_free(&f->in_thread_queue);
4075 }
4076 
4077 static void free_input_threads(void)
4078 {
4079  int i;
4080 
4081  for (i = 0; i < nb_input_files; i++)
4082  free_input_thread(i);
4083 }
4084 
4085 static int init_input_thread(int i)
4086 {
4087  int ret;
4088  InputFile *f = input_files[i];
4089 
4090  if (nb_input_files == 1)
4091  return 0;
4092 
4093  if (f->ctx->pb ? !f->ctx->pb->seekable :
4094  strcmp(f->ctx->iformat->name, "lavfi"))
4095  f->non_blocking = 1;
4096  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4097  f->thread_queue_size, sizeof(AVPacket));
4098  if (ret < 0)
4099  return ret;
4100 
4101  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4102  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4103  av_thread_message_queue_free(&f->in_thread_queue);
4104  return AVERROR(ret);
4105  }
4106 
4107  return 0;
4108 }
4109 
4110 static int init_input_threads(void)
4111 {
4112  int i, ret;
4113 
4114  for (i = 0; i < nb_input_files; i++) {
4115  ret = init_input_thread(i);
4116  if (ret < 0)
4117  return ret;
4118  }
4119  return 0;
4120 }
4121 
4122 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4123 {
4124  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4125  f->non_blocking ?
4127 }
4128 #endif
4129 
4131 {
4132  if (f->rate_emu) {
4133  int i;
4134  for (i = 0; i < f->nb_streams; i++) {
4135  InputStream *ist = input_streams[f->ist_index + i];
4136  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4137  int64_t now = av_gettime_relative() - ist->start;
4138  if (pts > now)
4139  return AVERROR(EAGAIN);
4140  }
4141  }
4142 
4143 #if HAVE_THREADS
4144  if (nb_input_files > 1)
4145  return get_input_packet_mt(f, pkt);
4146 #endif
4147  return av_read_frame(f->ctx, pkt);
4148 }
4149 
4150 static int got_eagain(void)
4151 {
4152  int i;
4153  for (i = 0; i < nb_output_streams; i++)
4154  if (output_streams[i]->unavailable)
4155  return 1;
4156  return 0;
4157 }
4158 
4159 static void reset_eagain(void)
4160 {
4161  int i;
4162  for (i = 0; i < nb_input_files; i++)
4163  input_files[i]->eagain = 0;
4164  for (i = 0; i < nb_output_streams; i++)
4165  output_streams[i]->unavailable = 0;
4166 }
4167 
4168 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4169 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4170  AVRational time_base)
4171 {
4172  int ret;
4173 
4174  if (!*duration) {
4175  *duration = tmp;
4176  return tmp_time_base;
4177  }
4178 
4179  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4180  if (ret < 0) {
4181  *duration = tmp;
4182  return tmp_time_base;
4183  }
4184 
4185  return time_base;
4186 }
4187 
4189 {
4190  InputStream *ist;
4191  AVCodecContext *avctx;
4192  int i, ret, has_audio = 0;
4193  int64_t duration = 0;
4194 
4195  ret = av_seek_frame(is, -1, is->start_time, 0);
4196  if (ret < 0)
4197  return ret;
4198 
4199  for (i = 0; i < ifile->nb_streams; i++) {
4200  ist = input_streams[ifile->ist_index + i];
4201  avctx = ist->dec_ctx;
4202 
4203  /* duration is the length of the last frame in a stream
4204  * when audio stream is present we don't care about
4205  * last video frame length because it's not defined exactly */
4206  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4207  has_audio = 1;
4208  }
4209 
4210  for (i = 0; i < ifile->nb_streams; i++) {
4211  ist = input_streams[ifile->ist_index + i];
4212  avctx = ist->dec_ctx;
4213 
4214  if (has_audio) {
4215  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4216  AVRational sample_rate = {1, avctx->sample_rate};
4217 
4218  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4219  } else {
4220  continue;
4221  }
4222  } else {
4223  if (ist->framerate.num) {
4224  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4225  } else if (ist->st->avg_frame_rate.num) {
4226  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4227  } else {
4228  duration = 1;
4229  }
4230  }
4231  if (!ifile->duration)
4232  ifile->time_base = ist->st->time_base;
4233  /* the total duration of the stream, max_pts - min_pts is
4234  * the duration of the stream without the last frame */
4235  duration += ist->max_pts - ist->min_pts;
4236  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4237  ifile->time_base);
4238  }
4239 
4240  if (ifile->loop > 0)
4241  ifile->loop--;
4242 
4243  return ret;
4244 }
4245 
4246 /*
4247  * Return
4248  * - 0 -- one packet was read and processed
4249  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4250  * this function should be called again
4251  * - AVERROR_EOF -- this function should not be called again
4252  */
4253 static int process_input(int file_index)
4254 {
4255  InputFile *ifile = input_files[file_index];
4256  AVFormatContext *is;
4257  InputStream *ist;
4258  AVPacket pkt;
4259  int ret, thread_ret, i, j;
4260  int64_t duration;
4261  int64_t pkt_dts;
4262 
4263  is = ifile->ctx;
4264  ret = get_input_packet(ifile, &pkt);
4265 
4266  if (ret == AVERROR(EAGAIN)) {
4267  ifile->eagain = 1;
4268  return ret;
4269  }
4270  if (ret < 0 && ifile->loop) {
4271  AVCodecContext *avctx;
4272  for (i = 0; i < ifile->nb_streams; i++) {
4273  ist = input_streams[ifile->ist_index + i];
4274  avctx = ist->dec_ctx;
4275  if (ist->decoding_needed) {
4276  ret = process_input_packet(ist, NULL, 1);
4277  if (ret>0)
4278  return 0;
4279  avcodec_flush_buffers(avctx);
4280  }
4281  }
4282 #if HAVE_THREADS
4283  free_input_thread(file_index);
4284 #endif
4285  ret = seek_to_start(ifile, is);
4286 #if HAVE_THREADS
4287  thread_ret = init_input_thread(file_index);
4288  if (thread_ret < 0)
4289  return thread_ret;
4290 #endif
4291  if (ret < 0)
4292  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4293  else
4294  ret = get_input_packet(ifile, &pkt);
4295  if (ret == AVERROR(EAGAIN)) {
4296  ifile->eagain = 1;
4297  return ret;
4298  }
4299  }
4300  if (ret < 0) {
4301  if (ret != AVERROR_EOF) {
4302  print_error(is->url, ret);
4303  if (exit_on_error)
4304  exit_program(1);
4305  }
4306 
4307  for (i = 0; i < ifile->nb_streams; i++) {
4308  ist = input_streams[ifile->ist_index + i];
4309  if (ist->decoding_needed) {
4310  ret = process_input_packet(ist, NULL, 0);
4311  if (ret>0)
4312  return 0;
4313  }
4314 
4315  /* mark all outputs that don't go through lavfi as finished */
4316  for (j = 0; j < nb_output_streams; j++) {
4317  OutputStream *ost = output_streams[j];
4318 
4319  if (ost->source_index == ifile->ist_index + i &&
4320  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4321  finish_output_stream(ost);
4322  }
4323  }
4324 
4325  ifile->eof_reached = 1;
4326  return AVERROR(EAGAIN);
4327  }
4328 
4329  reset_eagain();
4330 
4331  if (do_pkt_dump) {
4333  is->streams[pkt.stream_index]);
4334  }
4335  /* the following test is needed in case new streams appear
4336  dynamically in stream : we ignore them */
4337  if (pkt.stream_index >= ifile->nb_streams) {
4338  report_new_stream(file_index, &pkt);
4339  goto discard_packet;
4340  }
4341 
4342  ist = input_streams[ifile->ist_index + pkt.stream_index];
4343 
4344  ist->data_size += pkt.size;
4345  ist->nb_packets++;
4346 
4347  if (ist->discard)
4348  goto discard_packet;
4349 
4350  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4352  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4353  if (exit_on_error)
4354  exit_program(1);
4355  }
4356 
4357  if (debug_ts) {
4358  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4359  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4363  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4364  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4365  av_ts2str(input_files[ist->file_index]->ts_offset),
4366  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4367  }
4368 
4369  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4370  int64_t stime, stime2;
4371  // Correcting starttime based on the enabled streams
4372  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4373  // so we instead do it here as part of discontinuity handling
4374  if ( ist->next_dts == AV_NOPTS_VALUE
4375  && ifile->ts_offset == -is->start_time
4376  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4377  int64_t new_start_time = INT64_MAX;
4378  for (i=0; i<is->nb_streams; i++) {
4379  AVStream *st = is->streams[i];
4380  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4381  continue;
4382  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4383  }
4384  if (new_start_time > is->start_time) {
4385  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4386  ifile->ts_offset = -new_start_time;
4387  }
4388  }
4389 
4390  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4391  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4392  ist->wrap_correction_done = 1;
4393 
4394  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4395  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4396  ist->wrap_correction_done = 0;
4397  }
4398  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4399  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4400  ist->wrap_correction_done = 0;
4401  }
4402  }
4403 
4404  /* add the stream-global side data to the first packet */
4405  if (ist->nb_packets == 1) {
4406  for (i = 0; i < ist->st->nb_side_data; i++) {
4407  AVPacketSideData *src_sd = &ist->st->side_data[i];
4408  uint8_t *dst_data;
4409 
4410  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4411  continue;
4412 
4413  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4414  continue;
4415 
4416  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4417  if (!dst_data)
4418  exit_program(1);
4419 
4420  memcpy(dst_data, src_sd->data, src_sd->size);
4421  }
4422  }
4423 
4424  if (pkt.dts != AV_NOPTS_VALUE)
4425  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4426  if (pkt.pts != AV_NOPTS_VALUE)
4427  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4428 
4429  if (pkt.pts != AV_NOPTS_VALUE)
4430  pkt.pts *= ist->ts_scale;
4431  if (pkt.dts != AV_NOPTS_VALUE)
4432  pkt.dts *= ist->ts_scale;
4433 
4435  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4437  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4438  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4439  int64_t delta = pkt_dts - ifile->last_ts;
4440  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4441  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4442  ifile->ts_offset -= delta;
4444  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4445  delta, ifile->ts_offset);
4446  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4447  if (pkt.pts != AV_NOPTS_VALUE)
4448  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4449  }
4450  }
4451 
4452  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4453  if (pkt.pts != AV_NOPTS_VALUE) {
4454  pkt.pts += duration;
4455  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4456  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4457  }
4458 
4459  if (pkt.dts != AV_NOPTS_VALUE)
4460  pkt.dts += duration;
4461 
4463  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4465  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4466  !copy_ts) {
4467  int64_t delta = pkt_dts - ist->next_dts;
4468  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4469  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4470  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4471  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4472  ifile->ts_offset -= delta;
4474  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4475  delta, ifile->ts_offset);
4476  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4477  if (pkt.pts != AV_NOPTS_VALUE)
4478  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4479  }
4480  } else {
4481  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4482  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4483  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4484  pkt.dts = AV_NOPTS_VALUE;
4485  }
4486  if (pkt.pts != AV_NOPTS_VALUE){
4487  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4488  delta = pkt_pts - ist->next_dts;
4489  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4490  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4491  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4492  pkt.pts = AV_NOPTS_VALUE;
4493  }
4494  }
4495  }
4496  }
4497 
4498  if (pkt.dts != AV_NOPTS_VALUE)
4499  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4500 
4501  if (debug_ts) {
4502  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4504  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4505  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4506  av_ts2str(input_files[ist->file_index]->ts_offset),
4507  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4508  }
4509 
4510  sub2video_heartbeat(ist, pkt.pts);
4511 
4512  process_input_packet(ist, &pkt, 0);
4513 
4514 discard_packet:
4515  av_packet_unref(&pkt);
4516 
4517  return 0;
4518 }
4519 
4520 /**
4521  * Perform a step of transcoding for the specified filter graph.
4522  *
4523  * @param[in] graph filter graph to consider
4524  * @param[out] best_ist input stream where a frame would allow to continue
4525  * @return 0 for success, <0 for error
4526  */
4527 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4528 {
4529  int i, ret;
4530  int nb_requests, nb_requests_max = 0;
4531  InputFilter *ifilter;
4532  InputStream *ist;
4533 
4534  *best_ist = NULL;
4535  ret = avfilter_graph_request_oldest(graph->graph);
4536  if (ret >= 0)
4537  return reap_filters(0);
4538 
4539  if (ret == AVERROR_EOF) {
4540  ret = reap_filters(1);
4541  for (i = 0; i < graph->nb_outputs; i++)
4542  close_output_stream(graph->outputs[i]->ost);
4543  return ret;
4544  }
4545  if (ret != AVERROR(EAGAIN))
4546  return ret;
4547 
4548  for (i = 0; i < graph->nb_inputs; i++) {
4549  ifilter = graph->inputs[i];
4550  ist = ifilter->ist;
4551  if (input_files[ist->file_index]->eagain ||
4552  input_files[ist->file_index]->eof_reached)
4553  continue;
4554  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4555  if (nb_requests > nb_requests_max) {
4556  nb_requests_max = nb_requests;
4557  *best_ist = ist;
4558  }
4559  }
4560 
4561  if (!*best_ist)
4562  for (i = 0; i < graph->nb_outputs; i++)
4563  graph->outputs[i]->ost->unavailable = 1;
4564 
4565  return 0;
4566 }
4567 
4568 /**
4569  * Run a single step of transcoding.
4570  *
4571  * @return 0 for success, <0 for error
4572  */
4573 static int transcode_step(void)
4574 {
4575  OutputStream *ost;
4576  InputStream *ist = NULL;
4577  int ret;
4578 
4579  ost = choose_output();
4580  if (!ost) {
4581  if (got_eagain()) {
4582  reset_eagain();
4583  av_usleep(10000);
4584  return 0;
4585  }
4586  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4587  return AVERROR_EOF;
4588  }
4589 
4590  if (ost->filter && !ost->filter->graph->graph) {
4592  ret = configure_filtergraph(ost->filter->graph);
4593  if (ret < 0) {
4594  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4595  return ret;
4596  }
4597  }
4598  }
4599 
4600  if (ost->filter && ost->filter->graph->graph) {
4601  if (!ost->initialized) {
4602  char error[1024] = {0};
4603  ret = init_output_stream(ost, error, sizeof(error));
4604  if (ret < 0) {
4605  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4606  ost->file_index, ost->index, error);
4607  exit_program(1);
4608  }
4609  }
4610  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4611  return ret;
4612  if (!ist)
4613  return 0;
4614  } else if (ost->filter) {
4615  int i;
4616  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4617  InputFilter *ifilter = ost->filter->graph->inputs[i];
4618  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4619  ist = ifilter->ist;
4620  break;
4621  }