FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570 
573 
574  if (ost->muxing_queue) {
575  while (av_fifo_size(ost->muxing_queue)) {
576  AVPacket pkt;
577  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578  av_packet_unref(&pkt);
579  }
581  }
582 
583  av_freep(&output_streams[i]);
584  }
585 #if HAVE_THREADS
586  free_input_threads();
587 #endif
588  for (i = 0; i < nb_input_files; i++) {
589  avformat_close_input(&input_files[i]->ctx);
590  av_freep(&input_files[i]);
591  }
592  for (i = 0; i < nb_input_streams; i++) {
593  InputStream *ist = input_streams[i];
594 
597  av_dict_free(&ist->decoder_opts);
600  av_freep(&ist->filters);
601  av_freep(&ist->hwaccel_device);
602  av_freep(&ist->dts_buffer);
603 
605 
606  av_freep(&input_streams[i]);
607  }
608 
609  if (vstats_file) {
610  if (fclose(vstats_file))
612  "Error closing vstats file, loss of information possible: %s\n",
613  av_err2str(AVERROR(errno)));
614  }
616 
617  av_freep(&input_streams);
618  av_freep(&input_files);
619  av_freep(&output_streams);
620  av_freep(&output_files);
621 
622  uninit_opts();
623 
625 
626  if (received_sigterm) {
627  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628  (int) received_sigterm);
629  } else if (ret && atomic_load(&transcode_init_done)) {
630  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
631  }
632  term_exit();
633  ffmpeg_exited = 1;
634 }
635 
637 {
638  AVDictionaryEntry *t = NULL;
639 
640  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642  }
643 }
644 
646 {
648  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650  exit_program(1);
651  }
652 }
653 
654 static void abort_codec_experimental(AVCodec *c, int encoder)
655 {
656  exit_program(1);
657 }
658 
659 static void update_benchmark(const char *fmt, ...)
660 {
661  if (do_benchmark_all) {
663  va_list va;
664  char buf[1024];
665 
666  if (fmt) {
667  va_start(va, fmt);
668  vsnprintf(buf, sizeof(buf), fmt, va);
669  va_end(va);
671  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672  t.user_usec - current_time.user_usec,
673  t.sys_usec - current_time.sys_usec,
674  t.real_usec - current_time.real_usec, buf);
675  }
676  current_time = t;
677  }
678 }
679 
681 {
682  int i;
683  for (i = 0; i < nb_output_streams; i++) {
684  OutputStream *ost2 = output_streams[i];
685  ost2->finished |= ost == ost2 ? this_stream : others;
686  }
687 }
688 
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
690 {
691  AVFormatContext *s = of->ctx;
692  AVStream *st = ost->st;
693  int ret;
694 
695  /*
696  * Audio encoders may split the packets -- #frames in != #packets out.
697  * But there is no reordering, so we can limit the number of output packets
698  * by simply dropping them here.
699  * Counting encoded video frames needs to be done separately because of
700  * reordering, see do_video_out().
701  * Do not count the packet when unqueued because it has been counted when queued.
702  */
703  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704  if (ost->frame_number >= ost->max_frames) {
705  av_packet_unref(pkt);
706  return;
707  }
708  ost->frame_number++;
709  }
710 
711  if (!of->header_written) {
712  AVPacket tmp_pkt = {0};
713  /* the muxer is not initialized yet, buffer the packet */
714  if (!av_fifo_space(ost->muxing_queue)) {
715  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716  ost->max_muxing_queue_size);
717  if (new_size <= av_fifo_size(ost->muxing_queue)) {
719  "Too many packets buffered for output stream %d:%d.\n",
720  ost->file_index, ost->st->index);
721  exit_program(1);
722  }
723  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
724  if (ret < 0)
725  exit_program(1);
726  }
727  ret = av_packet_make_refcounted(pkt);
728  if (ret < 0)
729  exit_program(1);
730  av_packet_move_ref(&tmp_pkt, pkt);
731  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
732  return;
733  }
734 
737  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
738 
739  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
740  int i;
742  NULL);
743  ost->quality = sd ? AV_RL32(sd) : -1;
744  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
745 
746  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
747  if (sd && i < sd[5])
748  ost->error[i] = AV_RL64(sd + 8 + 8*i);
749  else
750  ost->error[i] = -1;
751  }
752 
753  if (ost->frame_rate.num && ost->is_cfr) {
754  if (pkt->duration > 0)
755  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
757  ost->mux_timebase);
758  }
759  }
760 
761  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
762 
763  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764  if (pkt->dts != AV_NOPTS_VALUE &&
765  pkt->pts != AV_NOPTS_VALUE &&
766  pkt->dts > pkt->pts) {
767  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
768  pkt->dts, pkt->pts,
769  ost->file_index, ost->st->index);
770  pkt->pts =
771  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
774  }
776  pkt->dts != AV_NOPTS_VALUE &&
777  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778  ost->last_mux_dts != AV_NOPTS_VALUE) {
779  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780  if (pkt->dts < max) {
781  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782  av_log(s, loglevel, "Non-monotonous DTS in output stream "
783  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
785  if (exit_on_error) {
786  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
787  exit_program(1);
788  }
789  av_log(s, loglevel, "changing to %"PRId64". This may result "
790  "in incorrect timestamps in the output file.\n",
791  max);
792  if (pkt->pts >= pkt->dts)
793  pkt->pts = FFMAX(pkt->pts, max);
794  pkt->dts = max;
795  }
796  }
797  }
798  ost->last_mux_dts = pkt->dts;
799 
800  ost->data_size += pkt->size;
801  ost->packets_written++;
802 
803  pkt->stream_index = ost->index;
804 
805  if (debug_ts) {
806  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
811  pkt->size
812  );
813  }
814 
815  ret = av_interleaved_write_frame(s, pkt);
816  if (ret < 0) {
817  print_error("av_interleaved_write_frame()", ret);
818  main_return_code = 1;
820  }
821  av_packet_unref(pkt);
822 }
823 
825 {
826  OutputFile *of = output_files[ost->file_index];
827 
828  ost->finished |= ENCODER_FINISHED;
829  if (of->shortest) {
830  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831  of->recording_time = FFMIN(of->recording_time, end);
832  }
833 }
834 
835 /*
836  * Send a single packet to the output, applying any bitstream filters
837  * associated with the output stream. This may result in any number
838  * of packets actually being written, depending on what bitstream
839  * filters are applied. The supplied packet is consumed and will be
840  * blank (as if newly-allocated) when this function returns.
841  *
842  * If eof is set, instead indicate EOF to all bitstream filters and
843  * therefore flush any delayed packets to the output. A blank packet
844  * must be supplied in this case.
845  */
847  OutputStream *ost, int eof)
848 {
849  int ret = 0;
850 
851  /* apply the output bitstream filters, if any */
852  if (ost->nb_bitstream_filters) {
853  int idx;
854 
855  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
856  if (ret < 0)
857  goto finish;
858 
859  eof = 0;
860  idx = 1;
861  while (idx) {
862  /* get a packet from the previous filter up the chain */
863  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864  if (ret == AVERROR(EAGAIN)) {
865  ret = 0;
866  idx--;
867  continue;
868  } else if (ret == AVERROR_EOF) {
869  eof = 1;
870  } else if (ret < 0)
871  goto finish;
872 
873  /* send it to the next filter down the chain or to the muxer */
874  if (idx < ost->nb_bitstream_filters) {
875  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
876  if (ret < 0)
877  goto finish;
878  idx++;
879  eof = 0;
880  } else if (eof)
881  goto finish;
882  else
883  write_packet(of, pkt, ost, 0);
884  }
885  } else if (!eof)
886  write_packet(of, pkt, ost, 0);
887 
888 finish:
889  if (ret < 0 && ret != AVERROR_EOF) {
890  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
892  if(exit_on_error)
893  exit_program(1);
894  }
895 }
896 
898 {
899  OutputFile *of = output_files[ost->file_index];
900 
901  if (of->recording_time != INT64_MAX &&
903  AV_TIME_BASE_Q) >= 0) {
904  close_output_stream(ost);
905  return 0;
906  }
907  return 1;
908 }
909 
911  AVFrame *frame)
912 {
913  AVCodecContext *enc = ost->enc_ctx;
914  AVPacket pkt;
915  int ret;
916 
917  av_init_packet(&pkt);
918  pkt.data = NULL;
919  pkt.size = 0;
920 
921  if (!check_recording_time(ost))
922  return;
923 
924  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925  frame->pts = ost->sync_opts;
926  ost->sync_opts = frame->pts + frame->nb_samples;
927  ost->samples_encoded += frame->nb_samples;
928  ost->frames_encoded++;
929 
930  av_assert0(pkt.size || !pkt.data);
932  if (debug_ts) {
933  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936  enc->time_base.num, enc->time_base.den);
937  }
938 
939  ret = avcodec_send_frame(enc, frame);
940  if (ret < 0)
941  goto error;
942 
943  while (1) {
944  ret = avcodec_receive_packet(enc, &pkt);
945  if (ret == AVERROR(EAGAIN))
946  break;
947  if (ret < 0)
948  goto error;
949 
950  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
951 
952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
953 
954  if (debug_ts) {
955  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
959  }
960 
961  output_packet(of, &pkt, ost, 0);
962  }
963 
964  return;
965 error:
966  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
967  exit_program(1);
968 }
969 
970 static void do_subtitle_out(OutputFile *of,
971  OutputStream *ost,
972  AVSubtitle *sub)
973 {
974  int subtitle_out_max_size = 1024 * 1024;
975  int subtitle_out_size, nb, i;
976  AVCodecContext *enc;
977  AVPacket pkt;
978  int64_t pts;
979 
980  if (sub->pts == AV_NOPTS_VALUE) {
981  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982  if (exit_on_error)
983  exit_program(1);
984  return;
985  }
986 
987  enc = ost->enc_ctx;
988 
989  if (!subtitle_out) {
990  subtitle_out = av_malloc(subtitle_out_max_size);
991  if (!subtitle_out) {
992  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
993  exit_program(1);
994  }
995  }
996 
997  /* Note: DVB subtitle need one packet to draw them and one other
998  packet to clear them */
999  /* XXX: signal it in the codec context ? */
1000  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1001  nb = 2;
1002  else
1003  nb = 1;
1004 
1005  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1006  pts = sub->pts;
1007  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008  pts -= output_files[ost->file_index]->start_time;
1009  for (i = 0; i < nb; i++) {
1010  unsigned save_num_rects = sub->num_rects;
1011 
1012  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  sub->pts = pts;
1017  // start_display_time is required to be 0
1018  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019  sub->end_display_time -= sub->start_display_time;
1020  sub->start_display_time = 0;
1021  if (i == 1)
1022  sub->num_rects = 0;
1023 
1024  ost->frames_encoded++;
1025 
1026  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027  subtitle_out_max_size, sub);
1028  if (i == 1)
1029  sub->num_rects = save_num_rects;
1030  if (subtitle_out_size < 0) {
1031  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1032  exit_program(1);
1033  }
1034 
1035  av_init_packet(&pkt);
1036  pkt.data = subtitle_out;
1037  pkt.size = subtitle_out_size;
1038  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041  /* XXX: the pts correction is handled here. Maybe handling
1042  it in the codec would be better */
1043  if (i == 0)
1044  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1045  else
1046  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047  }
1048  pkt.dts = pkt.pts;
1049  output_packet(of, &pkt, ost, 0);
1050  }
1051 }
1052 
1053 static void do_video_out(OutputFile *of,
1054  OutputStream *ost,
1055  AVFrame *next_picture,
1056  double sync_ipts)
1057 {
1058  int ret, format_video_sync;
1059  AVPacket pkt;
1060  AVCodecContext *enc = ost->enc_ctx;
1061  AVCodecParameters *mux_par = ost->st->codecpar;
1062  AVRational frame_rate;
1063  int nb_frames, nb0_frames, i;
1064  double delta, delta0;
1065  double duration = 0;
1066  int frame_size = 0;
1067  InputStream *ist = NULL;
1069 
1070  if (ost->source_index >= 0)
1071  ist = input_streams[ost->source_index];
1072 
1073  frame_rate = av_buffersink_get_frame_rate(filter);
1074  if (frame_rate.num > 0 && frame_rate.den > 0)
1075  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1076 
1077  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1079 
1080  if (!ost->filters_script &&
1081  !ost->filters &&
1082  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1083  next_picture &&
1084  ist &&
1085  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1086  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1087  }
1088 
1089  if (!next_picture) {
1090  //end, flushing
1091  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1092  ost->last_nb0_frames[1],
1093  ost->last_nb0_frames[2]);
1094  } else {
1095  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1096  delta = delta0 + duration;
1097 
1098  /* by default, we output a single frame */
1099  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1100  nb_frames = 1;
1101 
1102  format_video_sync = video_sync_method;
1103  if (format_video_sync == VSYNC_AUTO) {
1104  if(!strcmp(of->ctx->oformat->name, "avi")) {
1105  format_video_sync = VSYNC_VFR;
1106  } else
1107  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1108  if ( ist
1109  && format_video_sync == VSYNC_CFR
1110  && input_files[ist->file_index]->ctx->nb_streams == 1
1111  && input_files[ist->file_index]->input_ts_offset == 0) {
1112  format_video_sync = VSYNC_VSCFR;
1113  }
1114  if (format_video_sync == VSYNC_CFR && copy_ts) {
1115  format_video_sync = VSYNC_VSCFR;
1116  }
1117  }
1118  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1119 
1120  if (delta0 < 0 &&
1121  delta > 0 &&
1122  format_video_sync != VSYNC_PASSTHROUGH &&
1123  format_video_sync != VSYNC_DROP) {
1124  if (delta0 < -0.6) {
1125  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1126  } else
1127  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1128  sync_ipts = ost->sync_opts;
1129  duration += delta0;
1130  delta0 = 0;
1131  }
1132 
1133  switch (format_video_sync) {
1134  case VSYNC_VSCFR:
1135  if (ost->frame_number == 0 && delta0 >= 0.5) {
1136  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1137  delta = duration;
1138  delta0 = 0;
1139  ost->sync_opts = lrint(sync_ipts);
1140  }
1141  case VSYNC_CFR:
1142  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1143  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1144  nb_frames = 0;
1145  } else if (delta < -1.1)
1146  nb_frames = 0;
1147  else if (delta > 1.1) {
1148  nb_frames = lrintf(delta);
1149  if (delta0 > 1.1)
1150  nb0_frames = lrintf(delta0 - 0.6);
1151  }
1152  break;
1153  case VSYNC_VFR:
1154  if (delta <= -0.6)
1155  nb_frames = 0;
1156  else if (delta > 0.6)
1157  ost->sync_opts = lrint(sync_ipts);
1158  break;
1159  case VSYNC_DROP:
1160  case VSYNC_PASSTHROUGH:
1161  ost->sync_opts = lrint(sync_ipts);
1162  break;
1163  default:
1164  av_assert0(0);
1165  }
1166  }
1167 
1168  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1169  nb0_frames = FFMIN(nb0_frames, nb_frames);
1170 
1171  memmove(ost->last_nb0_frames + 1,
1172  ost->last_nb0_frames,
1173  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1174  ost->last_nb0_frames[0] = nb0_frames;
1175 
1176  if (nb0_frames == 0 && ost->last_dropped) {
1177  nb_frames_drop++;
1179  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1180  ost->frame_number, ost->st->index, ost->last_frame->pts);
1181  }
1182  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1183  if (nb_frames > dts_error_threshold * 30) {
1184  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1185  nb_frames_drop++;
1186  return;
1187  }
1188  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1189  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1190  if (nb_frames_dup > dup_warning) {
1191  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1192  dup_warning *= 10;
1193  }
1194  }
1195  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1196 
1197  /* duplicates frame if needed */
1198  for (i = 0; i < nb_frames; i++) {
1199  AVFrame *in_picture;
1200  int forced_keyframe = 0;
1201  double pts_time;
1202  av_init_packet(&pkt);
1203  pkt.data = NULL;
1204  pkt.size = 0;
1205 
1206  if (i < nb0_frames && ost->last_frame) {
1207  in_picture = ost->last_frame;
1208  } else
1209  in_picture = next_picture;
1210 
1211  if (!in_picture)
1212  return;
1213 
1214  in_picture->pts = ost->sync_opts;
1215 
1216  if (!check_recording_time(ost))
1217  return;
1218 
1220  ost->top_field_first >= 0)
1221  in_picture->top_field_first = !!ost->top_field_first;
1222 
1223  if (in_picture->interlaced_frame) {
1224  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1225  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1226  else
1227  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1228  } else
1229  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1230 
1231  in_picture->quality = enc->global_quality;
1232  in_picture->pict_type = 0;
1233 
1234  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1235  in_picture->pts != AV_NOPTS_VALUE)
1236  ost->forced_kf_ref_pts = in_picture->pts;
1237 
1238  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1239  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1240  if (ost->forced_kf_index < ost->forced_kf_count &&
1241  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1242  ost->forced_kf_index++;
1243  forced_keyframe = 1;
1244  } else if (ost->forced_keyframes_pexpr) {
1245  double res;
1246  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1249  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1255  res);
1256  if (res) {
1257  forced_keyframe = 1;
1263  }
1264 
1266  } else if ( ost->forced_keyframes
1267  && !strncmp(ost->forced_keyframes, "source", 6)
1268  && in_picture->key_frame==1) {
1269  forced_keyframe = 1;
1270  }
1271 
1272  if (forced_keyframe) {
1273  in_picture->pict_type = AV_PICTURE_TYPE_I;
1274  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1275  }
1276 
1278  if (debug_ts) {
1279  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1280  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1281  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1282  enc->time_base.num, enc->time_base.den);
1283  }
1284 
1285  ost->frames_encoded++;
1286 
1287  ret = avcodec_send_frame(enc, in_picture);
1288  if (ret < 0)
1289  goto error;
1290  // Make sure Closed Captions will not be duplicated
1292 
1293  while (1) {
1294  ret = avcodec_receive_packet(enc, &pkt);
1295  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296  if (ret == AVERROR(EAGAIN))
1297  break;
1298  if (ret < 0)
1299  goto error;
1300 
1301  if (debug_ts) {
1302  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1306  }
1307 
1308  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309  pkt.pts = ost->sync_opts;
1310 
1311  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1312 
1313  if (debug_ts) {
1314  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1318  }
1319 
1320  frame_size = pkt.size;
1321  output_packet(of, &pkt, ost, 0);
1322 
1323  /* if two pass, output log */
1324  if (ost->logfile && enc->stats_out) {
1325  fprintf(ost->logfile, "%s", enc->stats_out);
1326  }
1327  }
1328  ost->sync_opts++;
1329  /*
1330  * For video, number of frames in == number of packets out.
1331  * But there may be reordering, so we can't throw away frames on encoder
1332  * flush, we need to limit them here, before they go into encoder.
1333  */
1334  ost->frame_number++;
1335 
1336  if (vstats_filename && frame_size)
1337  do_video_stats(ost, frame_size);
1338  }
1339 
1340  if (!ost->last_frame)
1341  ost->last_frame = av_frame_alloc();
1342  av_frame_unref(ost->last_frame);
1343  if (next_picture && ost->last_frame)
1344  av_frame_ref(ost->last_frame, next_picture);
1345  else
1346  av_frame_free(&ost->last_frame);
1347 
1348  return;
1349 error:
1350  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1351  exit_program(1);
1352 }
1353 
1354 static double psnr(double d)
1355 {
1356  return -10.0 * log10(d);
1357 }
1358 
1360 {
1361  AVCodecContext *enc;
1362  int frame_number;
1363  double ti1, bitrate, avg_bitrate;
1364 
1365  /* this is executed just the first time do_video_stats is called */
1366  if (!vstats_file) {
1367  vstats_file = fopen(vstats_filename, "w");
1368  if (!vstats_file) {
1369  perror("fopen");
1370  exit_program(1);
1371  }
1372  }
1373 
1374  enc = ost->enc_ctx;
1375  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376  frame_number = ost->st->nb_frames;
1377  if (vstats_version <= 1) {
1378  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379  ost->quality / (float)FF_QP2LAMBDA);
1380  } else {
1381  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382  ost->quality / (float)FF_QP2LAMBDA);
1383  }
1384 
1385  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 
1388  fprintf(vstats_file,"f_size= %6d ", frame_size);
1389  /* compute pts value */
1390  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1391  if (ti1 < 0.01)
1392  ti1 = 0.01;
1393 
1394  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1399  }
1400 }
1401 
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 
1405 {
1406  OutputFile *of = output_files[ost->file_index];
1407  int i;
1408 
1410 
1411  if (of->shortest) {
1412  for (i = 0; i < of->ctx->nb_streams; i++)
1413  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414  }
1415 }
1416 
1417 /**
1418  * Get and encode new output from any of the filtergraphs, without causing
1419  * activity.
1420  *
1421  * @return 0 for success, <0 for severe errors
1422  */
1423 static int reap_filters(int flush)
1424 {
1425  AVFrame *filtered_frame = NULL;
1426  int i;
1427 
1428  /* Reap all buffers present in the buffer sinks */
1429  for (i = 0; i < nb_output_streams; i++) {
1430  OutputStream *ost = output_streams[i];
1431  OutputFile *of = output_files[ost->file_index];
1433  AVCodecContext *enc = ost->enc_ctx;
1434  int ret = 0;
1435 
1436  if (!ost->filter || !ost->filter->graph->graph)
1437  continue;
1438  filter = ost->filter->filter;
1439 
1440  if (!ost->initialized) {
1441  char error[1024] = "";
1442  ret = init_output_stream(ost, error, sizeof(error));
1443  if (ret < 0) {
1444  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445  ost->file_index, ost->index, error);
1446  exit_program(1);
1447  }
1448  }
1449 
1450  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451  return AVERROR(ENOMEM);
1452  }
1453  filtered_frame = ost->filtered_frame;
1454 
1455  while (1) {
1456  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459  if (ret < 0) {
1460  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463  } else if (flush && ret == AVERROR_EOF) {
1465  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1466  }
1467  break;
1468  }
1469  if (ost->finished) {
1470  av_frame_unref(filtered_frame);
1471  continue;
1472  }
1473  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475  AVRational filter_tb = av_buffersink_get_time_base(filter);
1476  AVRational tb = enc->time_base;
1477  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 
1479  tb.den <<= extra_bits;
1480  float_pts =
1481  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483  float_pts /= 1 << extra_bits;
1484  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 
1487  filtered_frame->pts =
1488  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1490  }
1491 
1492  switch (av_buffersink_get_type(filter)) {
1493  case AVMEDIA_TYPE_VIDEO:
1494  if (!ost->frame_aspect_ratio.num)
1495  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1496 
1497  if (debug_ts) {
1498  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1499  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1500  float_pts,
1501  enc->time_base.num, enc->time_base.den);
1502  }
1503 
1504  do_video_out(of, ost, filtered_frame, float_pts);
1505  break;
1506  case AVMEDIA_TYPE_AUDIO:
1507  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1508  enc->channels != filtered_frame->channels) {
1510  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1511  break;
1512  }
1513  do_audio_out(of, ost, filtered_frame);
1514  break;
1515  default:
1516  // TODO support subtitle filters
1517  av_assert0(0);
1518  }
1519 
1520  av_frame_unref(filtered_frame);
1521  }
1522  }
1523 
1524  return 0;
1525 }
1526 
1527 static void print_final_stats(int64_t total_size)
1528 {
1529  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1530  uint64_t subtitle_size = 0;
1531  uint64_t data_size = 0;
1532  float percent = -1.0;
1533  int i, j;
1534  int pass1_used = 1;
1535 
1536  for (i = 0; i < nb_output_streams; i++) {
1537  OutputStream *ost = output_streams[i];
1538  switch (ost->enc_ctx->codec_type) {
1539  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1540  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1541  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1542  default: other_size += ost->data_size; break;
1543  }
1544  extra_size += ost->enc_ctx->extradata_size;
1545  data_size += ost->data_size;
1548  pass1_used = 0;
1549  }
1550 
1551  if (data_size && total_size>0 && total_size >= data_size)
1552  percent = 100.0 * (total_size - data_size) / data_size;
1553 
1554  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1555  video_size / 1024.0,
1556  audio_size / 1024.0,
1557  subtitle_size / 1024.0,
1558  other_size / 1024.0,
1559  extra_size / 1024.0);
1560  if (percent >= 0.0)
1561  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1562  else
1563  av_log(NULL, AV_LOG_INFO, "unknown");
1564  av_log(NULL, AV_LOG_INFO, "\n");
1565 
1566  /* print verbose per-stream stats */
1567  for (i = 0; i < nb_input_files; i++) {
1568  InputFile *f = input_files[i];
1569  uint64_t total_packets = 0, total_size = 0;
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1572  i, f->ctx->url);
1573 
1574  for (j = 0; j < f->nb_streams; j++) {
1575  InputStream *ist = input_streams[f->ist_index + j];
1576  enum AVMediaType type = ist->dec_ctx->codec_type;
1577 
1578  total_size += ist->data_size;
1579  total_packets += ist->nb_packets;
1580 
1581  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1582  i, j, media_type_string(type));
1583  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1584  ist->nb_packets, ist->data_size);
1585 
1586  if (ist->decoding_needed) {
1587  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1588  ist->frames_decoded);
1589  if (type == AVMEDIA_TYPE_AUDIO)
1590  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1591  av_log(NULL, AV_LOG_VERBOSE, "; ");
1592  }
1593 
1594  av_log(NULL, AV_LOG_VERBOSE, "\n");
1595  }
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1598  total_packets, total_size);
1599  }
1600 
1601  for (i = 0; i < nb_output_files; i++) {
1602  OutputFile *of = output_files[i];
1603  uint64_t total_packets = 0, total_size = 0;
1604 
1605  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1606  i, of->ctx->url);
1607 
1608  for (j = 0; j < of->ctx->nb_streams; j++) {
1609  OutputStream *ost = output_streams[of->ost_index + j];
1610  enum AVMediaType type = ost->enc_ctx->codec_type;
1611 
1612  total_size += ost->data_size;
1613  total_packets += ost->packets_written;
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1616  i, j, media_type_string(type));
1617  if (ost->encoding_needed) {
1618  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1619  ost->frames_encoded);
1620  if (type == AVMEDIA_TYPE_AUDIO)
1621  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1622  av_log(NULL, AV_LOG_VERBOSE, "; ");
1623  }
1624 
1625  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1626  ost->packets_written, ost->data_size);
1627 
1628  av_log(NULL, AV_LOG_VERBOSE, "\n");
1629  }
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1632  total_packets, total_size);
1633  }
1634  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1635  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1636  if (pass1_used) {
1637  av_log(NULL, AV_LOG_WARNING, "\n");
1638  } else {
1639  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1640  }
1641  }
1642 }
1643 
1644 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1645 {
1646  AVBPrint buf, buf_script;
1647  OutputStream *ost;
1648  AVFormatContext *oc;
1649  int64_t total_size;
1650  AVCodecContext *enc;
1651  int frame_number, vid, i;
1652  double bitrate;
1653  double speed;
1654  int64_t pts = INT64_MIN + 1;
1655  static int64_t last_time = -1;
1656  static int qp_histogram[52];
1657  int hours, mins, secs, us;
1658  const char *hours_sign;
1659  int ret;
1660  float t;
1661 
1662  if (!print_stats && !is_last_report && !progress_avio)
1663  return;
1664 
1665  if (!is_last_report) {
1666  if (last_time == -1) {
1667  last_time = cur_time;
1668  return;
1669  }
1670  if ((cur_time - last_time) < 500000)
1671  return;
1672  last_time = cur_time;
1673  }
1674 
1675  t = (cur_time-timer_start) / 1000000.0;
1676 
1677 
1678  oc = output_files[0]->ctx;
1679 
1680  total_size = avio_size(oc->pb);
1681  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1682  total_size = avio_tell(oc->pb);
1683 
1684  vid = 0;
1686  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1687  for (i = 0; i < nb_output_streams; i++) {
1688  float q = -1;
1689  ost = output_streams[i];
1690  enc = ost->enc_ctx;
1691  if (!ost->stream_copy)
1692  q = ost->quality / (float) FF_QP2LAMBDA;
1693 
1694  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1695  av_bprintf(&buf, "q=%2.1f ", q);
1696  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1697  ost->file_index, ost->index, q);
1698  }
1699  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1700  float fps;
1701 
1702  frame_number = ost->frame_number;
1703  fps = t > 1 ? frame_number / t : 0;
1704  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1705  frame_number, fps < 9.95, fps, q);
1706  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1707  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1708  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1709  ost->file_index, ost->index, q);
1710  if (is_last_report)
1711  av_bprintf(&buf, "L");
1712  if (qp_hist) {
1713  int j;
1714  int qp = lrintf(q);
1715  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1716  qp_histogram[qp]++;
1717  for (j = 0; j < 32; j++)
1718  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1719  }
1720 
1721  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1722  int j;
1723  double error, error_sum = 0;
1724  double scale, scale_sum = 0;
1725  double p;
1726  char type[3] = { 'Y','U','V' };
1727  av_bprintf(&buf, "PSNR=");
1728  for (j = 0; j < 3; j++) {
1729  if (is_last_report) {
1730  error = enc->error[j];
1731  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1732  } else {
1733  error = ost->error[j];
1734  scale = enc->width * enc->height * 255.0 * 255.0;
1735  }
1736  if (j)
1737  scale /= 4;
1738  error_sum += error;
1739  scale_sum += scale;
1740  p = psnr(error / scale);
1741  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1742  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1743  ost->file_index, ost->index, type[j] | 32, p);
1744  }
1745  p = psnr(error_sum / scale_sum);
1746  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1747  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1748  ost->file_index, ost->index, p);
1749  }
1750  vid = 1;
1751  }
1752  /* compute min output value */
1754  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1755  ost->st->time_base, AV_TIME_BASE_Q));
1756  if (is_last_report)
1757  nb_frames_drop += ost->last_dropped;
1758  }
1759 
1760  secs = FFABS(pts) / AV_TIME_BASE;
1761  us = FFABS(pts) % AV_TIME_BASE;
1762  mins = secs / 60;
1763  secs %= 60;
1764  hours = mins / 60;
1765  mins %= 60;
1766  hours_sign = (pts < 0) ? "-" : "";
1767 
1768  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1769  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1770 
1771  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1772  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1773  if (pts == AV_NOPTS_VALUE) {
1774  av_bprintf(&buf, "N/A ");
1775  } else {
1776  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1777  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1778  }
1779 
1780  if (bitrate < 0) {
1781  av_bprintf(&buf, "bitrate=N/A");
1782  av_bprintf(&buf_script, "bitrate=N/A\n");
1783  }else{
1784  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1785  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1786  }
1787 
1788  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1789  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1790  if (pts == AV_NOPTS_VALUE) {
1791  av_bprintf(&buf_script, "out_time_us=N/A\n");
1792  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1793  av_bprintf(&buf_script, "out_time=N/A\n");
1794  } else {
1795  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1796  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1797  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1798  hours_sign, hours, mins, secs, us);
1799  }
1800 
1802  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1803  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1804  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1805 
1806  if (speed < 0) {
1807  av_bprintf(&buf, " speed=N/A");
1808  av_bprintf(&buf_script, "speed=N/A\n");
1809  } else {
1810  av_bprintf(&buf, " speed=%4.3gx", speed);
1811  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1812  }
1813 
1814  if (print_stats || is_last_report) {
1815  const char end = is_last_report ? '\n' : '\r';
1816  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1817  fprintf(stderr, "%s %c", buf.str, end);
1818  } else
1819  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1820 
1821  fflush(stderr);
1822  }
1823  av_bprint_finalize(&buf, NULL);
1824 
1825  if (progress_avio) {
1826  av_bprintf(&buf_script, "progress=%s\n",
1827  is_last_report ? "end" : "continue");
1828  avio_write(progress_avio, buf_script.str,
1829  FFMIN(buf_script.len, buf_script.size - 1));
1830  avio_flush(progress_avio);
1831  av_bprint_finalize(&buf_script, NULL);
1832  if (is_last_report) {
1833  if ((ret = avio_closep(&progress_avio)) < 0)
1835  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1836  }
1837  }
1838 
1839  if (is_last_report)
1840  print_final_stats(total_size);
1841 }
1842 
1844 {
1845  // We never got any input. Set a fake format, which will
1846  // come from libavformat.
1847  ifilter->format = par->format;
1848  ifilter->sample_rate = par->sample_rate;
1849  ifilter->channels = par->channels;
1850  ifilter->channel_layout = par->channel_layout;
1851  ifilter->width = par->width;
1852  ifilter->height = par->height;
1853  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1854 }
1855 
1856 static void flush_encoders(void)
1857 {
1858  int i, ret;
1859 
1860  for (i = 0; i < nb_output_streams; i++) {
1861  OutputStream *ost = output_streams[i];
1862  AVCodecContext *enc = ost->enc_ctx;
1863  OutputFile *of = output_files[ost->file_index];
1864 
1865  if (!ost->encoding_needed)
1866  continue;
1867 
1868  // Try to enable encoding with no input frames.
1869  // Maybe we should just let encoding fail instead.
1870  if (!ost->initialized) {
1871  FilterGraph *fg = ost->filter->graph;
1872  char error[1024] = "";
1873 
1875  "Finishing stream %d:%d without any data written to it.\n",
1876  ost->file_index, ost->st->index);
1877 
1878  if (ost->filter && !fg->graph) {
1879  int x;
1880  for (x = 0; x < fg->nb_inputs; x++) {
1881  InputFilter *ifilter = fg->inputs[x];
1882  if (ifilter->format < 0)
1883  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1884  }
1885 
1887  continue;
1888 
1889  ret = configure_filtergraph(fg);
1890  if (ret < 0) {
1891  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1892  exit_program(1);
1893  }
1894 
1895  finish_output_stream(ost);
1896  }
1897 
1898  ret = init_output_stream(ost, error, sizeof(error));
1899  if (ret < 0) {
1900  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1901  ost->file_index, ost->index, error);
1902  exit_program(1);
1903  }
1904  }
1905 
1906  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1907  continue;
1908 
1910  continue;
1911 
1912  for (;;) {
1913  const char *desc = NULL;
1914  AVPacket pkt;
1915  int pkt_size;
1916 
1917  switch (enc->codec_type) {
1918  case AVMEDIA_TYPE_AUDIO:
1919  desc = "audio";
1920  break;
1921  case AVMEDIA_TYPE_VIDEO:
1922  desc = "video";
1923  break;
1924  default:
1925  av_assert0(0);
1926  }
1927 
1928  av_init_packet(&pkt);
1929  pkt.data = NULL;
1930  pkt.size = 0;
1931 
1933 
1934  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1935  ret = avcodec_send_frame(enc, NULL);
1936  if (ret < 0) {
1937  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1938  desc,
1939  av_err2str(ret));
1940  exit_program(1);
1941  }
1942  }
1943 
1944  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1945  if (ret < 0 && ret != AVERROR_EOF) {
1946  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1947  desc,
1948  av_err2str(ret));
1949  exit_program(1);
1950  }
1951  if (ost->logfile && enc->stats_out) {
1952  fprintf(ost->logfile, "%s", enc->stats_out);
1953  }
1954  if (ret == AVERROR_EOF) {
1955  output_packet(of, &pkt, ost, 1);
1956  break;
1957  }
1958  if (ost->finished & MUXER_FINISHED) {
1959  av_packet_unref(&pkt);
1960  continue;
1961  }
1962  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1963  pkt_size = pkt.size;
1964  output_packet(of, &pkt, ost, 0);
1966  do_video_stats(ost, pkt_size);
1967  }
1968  }
1969  }
1970 }
1971 
1972 /*
1973  * Check whether a packet from ist should be written into ost at this time
1974  */
1976 {
1977  OutputFile *of = output_files[ost->file_index];
1978  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1979 
1980  if (ost->source_index != ist_index)
1981  return 0;
1982 
1983  if (ost->finished)
1984  return 0;
1985 
1986  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1987  return 0;
1988 
1989  return 1;
1990 }
1991 
1992 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1993 {
1994  OutputFile *of = output_files[ost->file_index];
1995  InputFile *f = input_files [ist->file_index];
1996  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1997  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1998  AVPacket opkt;
1999 
2000  // EOF: flush output bitstream filters.
2001  if (!pkt) {
2002  av_init_packet(&opkt);
2003  opkt.data = NULL;
2004  opkt.size = 0;
2005  output_packet(of, &opkt, ost, 1);
2006  return;
2007  }
2008 
2009  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2011  return;
2012 
2013  if (!ost->frame_number && !ost->copy_prior_start) {
2014  int64_t comp_start = start_time;
2015  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2016  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2017  if (pkt->pts == AV_NOPTS_VALUE ?
2018  ist->pts < comp_start :
2019  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2020  return;
2021  }
2022 
2023  if (of->recording_time != INT64_MAX &&
2024  ist->pts >= of->recording_time + start_time) {
2025  close_output_stream(ost);
2026  return;
2027  }
2028 
2029  if (f->recording_time != INT64_MAX) {
2030  start_time = f->ctx->start_time;
2031  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2032  start_time += f->start_time;
2033  if (ist->pts >= f->recording_time + start_time) {
2034  close_output_stream(ost);
2035  return;
2036  }
2037  }
2038 
2039  /* force the input stream PTS */
2040  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2041  ost->sync_opts++;
2042 
2043  if (av_packet_ref(&opkt, pkt) < 0)
2044  exit_program(1);
2045 
2046  if (pkt->pts != AV_NOPTS_VALUE)
2047  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2048 
2049  if (pkt->dts == AV_NOPTS_VALUE)
2050  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2051  else
2052  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2053  opkt.dts -= ost_tb_start_time;
2054 
2055  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2057  if(!duration)
2058  duration = ist->dec_ctx->frame_size;
2059  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2060  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2061  ost->mux_timebase) - ost_tb_start_time;
2062  }
2063 
2064  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2065 
2066  output_packet(of, &opkt, ost, 0);
2067 }
2068 
2070 {
2071  AVCodecContext *dec = ist->dec_ctx;
2072 
2073  if (!dec->channel_layout) {
2074  char layout_name[256];
2075 
2076  if (dec->channels > ist->guess_layout_max)
2077  return 0;
2079  if (!dec->channel_layout)
2080  return 0;
2081  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2082  dec->channels, dec->channel_layout);
2083  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2084  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2085  }
2086  return 1;
2087 }
2088 
2089 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2090 {
2091  if (*got_output || ret<0)
2092  decode_error_stat[ret<0] ++;
2093 
2094  if (ret < 0 && exit_on_error)
2095  exit_program(1);
2096 
2097  if (*got_output && ist) {
2100  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2101  if (exit_on_error)
2102  exit_program(1);
2103  }
2104  }
2105 }
2106 
2107 // Filters can be configured only if the formats of all inputs are known.
2109 {
2110  int i;
2111  for (i = 0; i < fg->nb_inputs; i++) {
2112  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2113  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2114  return 0;
2115  }
2116  return 1;
2117 }
2118 
2120 {
2121  FilterGraph *fg = ifilter->graph;
2122  int need_reinit, ret, i;
2123 
2124  /* determine if the parameters for this input changed */
2125  need_reinit = ifilter->format != frame->format;
2126 
2127  switch (ifilter->ist->st->codecpar->codec_type) {
2128  case AVMEDIA_TYPE_AUDIO:
2129  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2130  ifilter->channels != frame->channels ||
2131  ifilter->channel_layout != frame->channel_layout;
2132  break;
2133  case AVMEDIA_TYPE_VIDEO:
2134  need_reinit |= ifilter->width != frame->width ||
2135  ifilter->height != frame->height;
2136  break;
2137  }
2138 
2139  if (!ifilter->ist->reinit_filters && fg->graph)
2140  need_reinit = 0;
2141 
2142  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2143  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2144  need_reinit = 1;
2145 
2146  if (need_reinit) {
2147  ret = ifilter_parameters_from_frame(ifilter, frame);
2148  if (ret < 0)
2149  return ret;
2150  }
2151 
2152  /* (re)init the graph if possible, otherwise buffer the frame and return */
2153  if (need_reinit || !fg->graph) {
2154  for (i = 0; i < fg->nb_inputs; i++) {
2155  if (!ifilter_has_all_input_formats(fg)) {
2156  AVFrame *tmp = av_frame_clone(frame);
2157  if (!tmp)
2158  return AVERROR(ENOMEM);
2159  av_frame_unref(frame);
2160 
2161  if (!av_fifo_space(ifilter->frame_queue)) {
2162  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2163  if (ret < 0) {
2164  av_frame_free(&tmp);
2165  return ret;
2166  }
2167  }
2168  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2169  return 0;
2170  }
2171  }
2172 
2173  ret = reap_filters(1);
2174  if (ret < 0 && ret != AVERROR_EOF) {
2175  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2176  return ret;
2177  }
2178 
2179  ret = configure_filtergraph(fg);
2180  if (ret < 0) {
2181  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2182  return ret;
2183  }
2184  }
2185 
2187  if (ret < 0) {
2188  if (ret != AVERROR_EOF)
2189  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2190  return ret;
2191  }
2192 
2193  return 0;
2194 }
2195 
2196 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2197 {
2198  int ret;
2199 
2200  ifilter->eof = 1;
2201 
2202  if (ifilter->filter) {
2203  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2204  if (ret < 0)
2205  return ret;
2206  } else {
2207  // the filtergraph was never configured
2208  if (ifilter->format < 0)
2209  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2210  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2211  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2212  return AVERROR_INVALIDDATA;
2213  }
2214  }
2215 
2216  return 0;
2217 }
2218 
2219 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2220 // There is the following difference: if you got a frame, you must call
2221 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2222 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2223 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2224 {
2225  int ret;
2226 
2227  *got_frame = 0;
2228 
2229  if (pkt) {
2230  ret = avcodec_send_packet(avctx, pkt);
2231  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2232  // decoded frames with avcodec_receive_frame() until done.
2233  if (ret < 0 && ret != AVERROR_EOF)
2234  return ret;
2235  }
2236 
2237  ret = avcodec_receive_frame(avctx, frame);
2238  if (ret < 0 && ret != AVERROR(EAGAIN))
2239  return ret;
2240  if (ret >= 0)
2241  *got_frame = 1;
2242 
2243  return 0;
2244 }
2245 
2246 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2247 {
2248  int i, ret;
2249  AVFrame *f;
2250 
2251  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2252  for (i = 0; i < ist->nb_filters; i++) {
2253  if (i < ist->nb_filters - 1) {
2254  f = ist->filter_frame;
2255  ret = av_frame_ref(f, decoded_frame);
2256  if (ret < 0)
2257  break;
2258  } else
2259  f = decoded_frame;
2260  ret = ifilter_send_frame(ist->filters[i], f);
2261  if (ret == AVERROR_EOF)
2262  ret = 0; /* ignore */
2263  if (ret < 0) {
2265  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2266  break;
2267  }
2268  }
2269  return ret;
2270 }
2271 
2272 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2273  int *decode_failed)
2274 {
2275  AVFrame *decoded_frame;
2276  AVCodecContext *avctx = ist->dec_ctx;
2277  int ret, err = 0;
2278  AVRational decoded_frame_tb;
2279 
2280  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2281  return AVERROR(ENOMEM);
2282  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2283  return AVERROR(ENOMEM);
2284  decoded_frame = ist->decoded_frame;
2285 
2287  ret = decode(avctx, decoded_frame, got_output, pkt);
2288  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2289  if (ret < 0)
2290  *decode_failed = 1;
2291 
2292  if (ret >= 0 && avctx->sample_rate <= 0) {
2293  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2294  ret = AVERROR_INVALIDDATA;
2295  }
2296 
2297  if (ret != AVERROR_EOF)
2298  check_decode_result(ist, got_output, ret);
2299 
2300  if (!*got_output || ret < 0)
2301  return ret;
2302 
2303  ist->samples_decoded += decoded_frame->nb_samples;
2304  ist->frames_decoded++;
2305 
2306  /* increment next_dts to use for the case where the input stream does not
2307  have timestamps or there are multiple frames in the packet */
2308  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2309  avctx->sample_rate;
2310  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2311  avctx->sample_rate;
2312 
2313  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2314  decoded_frame_tb = ist->st->time_base;
2315  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2316  decoded_frame->pts = pkt->pts;
2317  decoded_frame_tb = ist->st->time_base;
2318  }else {
2319  decoded_frame->pts = ist->dts;
2320  decoded_frame_tb = AV_TIME_BASE_Q;
2321  }
2322  if (decoded_frame->pts != AV_NOPTS_VALUE)
2323  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2324  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2325  (AVRational){1, avctx->sample_rate});
2326  ist->nb_samples = decoded_frame->nb_samples;
2327  err = send_frame_to_filters(ist, decoded_frame);
2328 
2330  av_frame_unref(decoded_frame);
2331  return err < 0 ? err : ret;
2332 }
2333 
2334 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2335  int *decode_failed)
2336 {
2337  AVFrame *decoded_frame;
2338  int i, ret = 0, err = 0;
2339  int64_t best_effort_timestamp;
2340  int64_t dts = AV_NOPTS_VALUE;
2341  AVPacket avpkt;
2342 
2343  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2344  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2345  // skip the packet.
2346  if (!eof && pkt && pkt->size == 0)
2347  return 0;
2348 
2349  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2350  return AVERROR(ENOMEM);
2351  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2352  return AVERROR(ENOMEM);
2353  decoded_frame = ist->decoded_frame;
2354  if (ist->dts != AV_NOPTS_VALUE)
2355  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2356  if (pkt) {
2357  avpkt = *pkt;
2358  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2359  }
2360 
2361  // The old code used to set dts on the drain packet, which does not work
2362  // with the new API anymore.
2363  if (eof) {
2364  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2365  if (!new)
2366  return AVERROR(ENOMEM);
2367  ist->dts_buffer = new;
2368  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2369  }
2370 
2372  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2373  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2374  if (ret < 0)
2375  *decode_failed = 1;
2376 
2377  // The following line may be required in some cases where there is no parser
2378  // or the parser does not has_b_frames correctly
2379  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2380  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2381  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2382  } else
2384  "video_delay is larger in decoder than demuxer %d > %d.\n"
2385  "If you want to help, upload a sample "
2386  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2387  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2388  ist->dec_ctx->has_b_frames,
2389  ist->st->codecpar->video_delay);
2390  }
2391 
2392  if (ret != AVERROR_EOF)
2393  check_decode_result(ist, got_output, ret);
2394 
2395  if (*got_output && ret >= 0) {
2396  if (ist->dec_ctx->width != decoded_frame->width ||
2397  ist->dec_ctx->height != decoded_frame->height ||
2398  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2399  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2400  decoded_frame->width,
2401  decoded_frame->height,
2402  decoded_frame->format,
2403  ist->dec_ctx->width,
2404  ist->dec_ctx->height,
2405  ist->dec_ctx->pix_fmt);
2406  }
2407  }
2408 
2409  if (!*got_output || ret < 0)
2410  return ret;
2411 
2412  if(ist->top_field_first>=0)
2413  decoded_frame->top_field_first = ist->top_field_first;
2414 
2415  ist->frames_decoded++;
2416 
2417  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2418  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2419  if (err < 0)
2420  goto fail;
2421  }
2422  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2423 
2424  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2425  *duration_pts = decoded_frame->pkt_duration;
2426 
2427  if (ist->framerate.num)
2428  best_effort_timestamp = ist->cfr_next_pts++;
2429 
2430  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2431  best_effort_timestamp = ist->dts_buffer[0];
2432 
2433  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2434  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2435  ist->nb_dts_buffer--;
2436  }
2437 
2438  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2439  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2440 
2441  if (ts != AV_NOPTS_VALUE)
2442  ist->next_pts = ist->pts = ts;
2443  }
2444 
2445  if (debug_ts) {
2446  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2447  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2448  ist->st->index, av_ts2str(decoded_frame->pts),
2449  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2450  best_effort_timestamp,
2451  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2452  decoded_frame->key_frame, decoded_frame->pict_type,
2453  ist->st->time_base.num, ist->st->time_base.den);
2454  }
2455 
2456  if (ist->st->sample_aspect_ratio.num)
2457  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2458 
2459  err = send_frame_to_filters(ist, decoded_frame);
2460 
2461 fail:
2463  av_frame_unref(decoded_frame);
2464  return err < 0 ? err : ret;
2465 }
2466 
2467 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2468  int *decode_failed)
2469 {
2470  AVSubtitle subtitle;
2471  int free_sub = 1;
2472  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2473  &subtitle, got_output, pkt);
2474 
2475  check_decode_result(NULL, got_output, ret);
2476 
2477  if (ret < 0 || !*got_output) {
2478  *decode_failed = 1;
2479  if (!pkt->size)
2480  sub2video_flush(ist);
2481  return ret;
2482  }
2483 
2484  if (ist->fix_sub_duration) {
2485  int end = 1;
2486  if (ist->prev_sub.got_output) {
2487  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2488  1000, AV_TIME_BASE);
2489  if (end < ist->prev_sub.subtitle.end_display_time) {
2490  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2491  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2493  end <= 0 ? ", dropping it" : "");
2495  }
2496  }
2497  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2498  FFSWAP(int, ret, ist->prev_sub.ret);
2499  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2500  if (end <= 0)
2501  goto out;
2502  }
2503 
2504  if (!*got_output)
2505  return ret;
2506 
2507  if (ist->sub2video.frame) {
2508  sub2video_update(ist, &subtitle);
2509  } else if (ist->nb_filters) {
2510  if (!ist->sub2video.sub_queue)
2511  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2512  if (!ist->sub2video.sub_queue)
2513  exit_program(1);
2514  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2516  if (ret < 0)
2517  exit_program(1);
2518  }
2519  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2520  free_sub = 0;
2521  }
2522 
2523  if (!subtitle.num_rects)
2524  goto out;
2525 
2526  ist->frames_decoded++;
2527 
2528  for (i = 0; i < nb_output_streams; i++) {
2529  OutputStream *ost = output_streams[i];
2530 
2531  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2532  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2533  continue;
2534 
2535  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2536  }
2537 
2538 out:
2539  if (free_sub)
2540  avsubtitle_free(&subtitle);
2541  return ret;
2542 }
2543 
2545 {
2546  int i, ret;
2547  /* TODO keep pts also in stream time base to avoid converting back */
2548  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2550 
2551  for (i = 0; i < ist->nb_filters; i++) {
2552  ret = ifilter_send_eof(ist->filters[i], pts);
2553  if (ret < 0)
2554  return ret;
2555  }
2556  return 0;
2557 }
2558 
2559 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2560 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2561 {
2562  int ret = 0, i;
2563  int repeating = 0;
2564  int eof_reached = 0;
2565 
2566  AVPacket avpkt;
2567  if (!ist->saw_first_ts) {
2568  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2569  ist->pts = 0;
2570  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2571  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2572  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2573  }
2574  ist->saw_first_ts = 1;
2575  }
2576 
2577  if (ist->next_dts == AV_NOPTS_VALUE)
2578  ist->next_dts = ist->dts;
2579  if (ist->next_pts == AV_NOPTS_VALUE)
2580  ist->next_pts = ist->pts;
2581 
2582  if (!pkt) {
2583  /* EOF handling */
2584  av_init_packet(&avpkt);
2585  avpkt.data = NULL;
2586  avpkt.size = 0;
2587  } else {
2588  avpkt = *pkt;
2589  }
2590 
2591  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2592  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2593  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2594  ist->next_pts = ist->pts = ist->dts;
2595  }
2596 
2597  // while we have more to decode or while the decoder did output something on EOF
2598  while (ist->decoding_needed) {
2599  int64_t duration_dts = 0;
2600  int64_t duration_pts = 0;
2601  int got_output = 0;
2602  int decode_failed = 0;
2603 
2604  ist->pts = ist->next_pts;
2605  ist->dts = ist->next_dts;
2606 
2607  switch (ist->dec_ctx->codec_type) {
2608  case AVMEDIA_TYPE_AUDIO:
2609  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2610  &decode_failed);
2611  break;
2612  case AVMEDIA_TYPE_VIDEO:
2613  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2614  &decode_failed);
2615  if (!repeating || !pkt || got_output) {
2616  if (pkt && pkt->duration) {
2617  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2618  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2620  duration_dts = ((int64_t)AV_TIME_BASE *
2621  ist->dec_ctx->framerate.den * ticks) /
2623  }
2624 
2625  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2626  ist->next_dts += duration_dts;
2627  }else
2628  ist->next_dts = AV_NOPTS_VALUE;
2629  }
2630 
2631  if (got_output) {
2632  if (duration_pts > 0) {
2633  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2634  } else {
2635  ist->next_pts += duration_dts;
2636  }
2637  }
2638  break;
2639  case AVMEDIA_TYPE_SUBTITLE:
2640  if (repeating)
2641  break;
2642  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2643  if (!pkt && ret >= 0)
2644  ret = AVERROR_EOF;
2645  break;
2646  default:
2647  return -1;
2648  }
2649 
2650  if (ret == AVERROR_EOF) {
2651  eof_reached = 1;
2652  break;
2653  }
2654 
2655  if (ret < 0) {
2656  if (decode_failed) {
2657  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2658  ist->file_index, ist->st->index, av_err2str(ret));
2659  } else {
2660  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2661  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2662  }
2663  if (!decode_failed || exit_on_error)
2664  exit_program(1);
2665  break;
2666  }
2667 
2668  if (got_output)
2669  ist->got_output = 1;
2670 
2671  if (!got_output)
2672  break;
2673 
2674  // During draining, we might get multiple output frames in this loop.
2675  // ffmpeg.c does not drain the filter chain on configuration changes,
2676  // which means if we send multiple frames at once to the filters, and
2677  // one of those frames changes configuration, the buffered frames will
2678  // be lost. This can upset certain FATE tests.
2679  // Decode only 1 frame per call on EOF to appease these FATE tests.
2680  // The ideal solution would be to rewrite decoding to use the new
2681  // decoding API in a better way.
2682  if (!pkt)
2683  break;
2684 
2685  repeating = 1;
2686  }
2687 
2688  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2689  /* except when looping we need to flush but not to send an EOF */
2690  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2691  int ret = send_filter_eof(ist);
2692  if (ret < 0) {
2693  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2694  exit_program(1);
2695  }
2696  }
2697 
2698  /* handle stream copy */
2699  if (!ist->decoding_needed && pkt) {
2700  ist->dts = ist->next_dts;
2701  switch (ist->dec_ctx->codec_type) {
2702  case AVMEDIA_TYPE_AUDIO:
2703  av_assert1(pkt->duration >= 0);
2704  if (ist->dec_ctx->sample_rate) {
2705  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2706  ist->dec_ctx->sample_rate;
2707  } else {
2708  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2709  }
2710  break;
2711  case AVMEDIA_TYPE_VIDEO:
2712  if (ist->framerate.num) {
2713  // TODO: Remove work-around for c99-to-c89 issue 7
2714  AVRational time_base_q = AV_TIME_BASE_Q;
2715  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2716  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2717  } else if (pkt->duration) {
2718  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2719  } else if(ist->dec_ctx->framerate.num != 0) {
2720  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2721  ist->next_dts += ((int64_t)AV_TIME_BASE *
2722  ist->dec_ctx->framerate.den * ticks) /
2724  }
2725  break;
2726  }
2727  ist->pts = ist->dts;
2728  ist->next_pts = ist->next_dts;
2729  }
2730  for (i = 0; i < nb_output_streams; i++) {
2731  OutputStream *ost = output_streams[i];
2732 
2733  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2734  continue;
2735 
2736  do_streamcopy(ist, ost, pkt);
2737  }
2738 
2739  return !eof_reached;
2740 }
2741 
2742 static void print_sdp(void)
2743 {
2744  char sdp[16384];
2745  int i;
2746  int j;
2747  AVIOContext *sdp_pb;
2748  AVFormatContext **avc;
2749 
2750  for (i = 0; i < nb_output_files; i++) {
2751  if (!output_files[i]->header_written)
2752  return;
2753  }
2754 
2755  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2756  if (!avc)
2757  exit_program(1);
2758  for (i = 0, j = 0; i < nb_output_files; i++) {
2759  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2760  avc[j] = output_files[i]->ctx;
2761  j++;
2762  }
2763  }
2764 
2765  if (!j)
2766  goto fail;
2767 
2768  av_sdp_create(avc, j, sdp, sizeof(sdp));
2769 
2770  if (!sdp_filename) {
2771  printf("SDP:\n%s\n", sdp);
2772  fflush(stdout);
2773  } else {
2774  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2775  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2776  } else {
2777  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2778  avio_closep(&sdp_pb);
2780  }
2781  }
2782 
2783 fail:
2784  av_freep(&avc);
2785 }
2786 
2788 {
2789  InputStream *ist = s->opaque;
2790  const enum AVPixelFormat *p;
2791  int ret;
2792 
2793  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2795  const AVCodecHWConfig *config = NULL;
2796  int i;
2797 
2798  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2799  break;
2800 
2801  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2802  ist->hwaccel_id == HWACCEL_AUTO) {
2803  for (i = 0;; i++) {
2804  config = avcodec_get_hw_config(s->codec, i);
2805  if (!config)
2806  break;
2807  if (!(config->methods &
2809  continue;
2810  if (config->pix_fmt == *p)
2811  break;
2812  }
2813  }
2814  if (config) {
2815  if (config->device_type != ist->hwaccel_device_type) {
2816  // Different hwaccel offered, ignore.
2817  continue;
2818  }
2819 
2820  ret = hwaccel_decode_init(s);
2821  if (ret < 0) {
2822  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2824  "%s hwaccel requested for input stream #%d:%d, "
2825  "but cannot be initialized.\n",
2827  ist->file_index, ist->st->index);
2828  return AV_PIX_FMT_NONE;
2829  }
2830  continue;
2831  }
2832  } else {
2833  const HWAccel *hwaccel = NULL;
2834  int i;
2835  for (i = 0; hwaccels[i].name; i++) {
2836  if (hwaccels[i].pix_fmt == *p) {
2837  hwaccel = &hwaccels[i];
2838  break;
2839  }
2840  }
2841  if (!hwaccel) {
2842  // No hwaccel supporting this pixfmt.
2843  continue;
2844  }
2845  if (hwaccel->id != ist->hwaccel_id) {
2846  // Does not match requested hwaccel.
2847  continue;
2848  }
2849 
2850  ret = hwaccel->init(s);
2851  if (ret < 0) {
2853  "%s hwaccel requested for input stream #%d:%d, "
2854  "but cannot be initialized.\n", hwaccel->name,
2855  ist->file_index, ist->st->index);
2856  return AV_PIX_FMT_NONE;
2857  }
2858  }
2859 
2860  if (ist->hw_frames_ctx) {
2862  if (!s->hw_frames_ctx)
2863  return AV_PIX_FMT_NONE;
2864  }
2865 
2866  ist->hwaccel_pix_fmt = *p;
2867  break;
2868  }
2869 
2870  return *p;
2871 }
2872 
2874 {
2875  InputStream *ist = s->opaque;
2876 
2877  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2878  return ist->hwaccel_get_buffer(s, frame, flags);
2879 
2880  return avcodec_default_get_buffer2(s, frame, flags);
2881 }
2882 
2883 static int init_input_stream(int ist_index, char *error, int error_len)
2884 {
2885  int ret;
2886  InputStream *ist = input_streams[ist_index];
2887 
2888  if (ist->decoding_needed) {
2889  AVCodec *codec = ist->dec;
2890  if (!codec) {
2891  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2892  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2893  return AVERROR(EINVAL);
2894  }
2895 
2896  ist->dec_ctx->opaque = ist;
2897  ist->dec_ctx->get_format = get_format;
2898  ist->dec_ctx->get_buffer2 = get_buffer;
2899  ist->dec_ctx->thread_safe_callbacks = 1;
2900 
2901  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2902  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2903  (ist->decoding_needed & DECODING_FOR_OST)) {
2904  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2906  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2907  }
2908 
2909  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2910 
2911  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2912  * audio, and video decoders such as cuvid or mediacodec */
2913  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2914 
2915  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2916  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2917  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2919  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2920 
2921  ret = hw_device_setup_for_decode(ist);
2922  if (ret < 0) {
2923  snprintf(error, error_len, "Device setup failed for "
2924  "decoder on input stream #%d:%d : %s",
2925  ist->file_index, ist->st->index, av_err2str(ret));
2926  return ret;
2927  }
2928 
2929  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2930  if (ret == AVERROR_EXPERIMENTAL)
2931  abort_codec_experimental(codec, 0);
2932 
2933  snprintf(error, error_len,
2934  "Error while opening decoder for input stream "
2935  "#%d:%d : %s",
2936  ist->file_index, ist->st->index, av_err2str(ret));
2937  return ret;
2938  }
2940  }
2941 
2942  ist->next_pts = AV_NOPTS_VALUE;
2943  ist->next_dts = AV_NOPTS_VALUE;
2944 
2945  return 0;
2946 }
2947 
2949 {
2950  if (ost->source_index >= 0)
2951  return input_streams[ost->source_index];
2952  return NULL;
2953 }
2954 
2955 static int compare_int64(const void *a, const void *b)
2956 {
2957  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2958 }
2959 
2960 /* open the muxer when all the streams are initialized */
2961 static int check_init_output_file(OutputFile *of, int file_index)
2962 {
2963  int ret, i;
2964 
2965  for (i = 0; i < of->ctx->nb_streams; i++) {
2966  OutputStream *ost = output_streams[of->ost_index + i];
2967  if (!ost->initialized)
2968  return 0;
2969  }
2970 
2971  of->ctx->interrupt_callback = int_cb;
2972 
2973  ret = avformat_write_header(of->ctx, &of->opts);
2974  if (ret < 0) {
2976  "Could not write header for output file #%d "
2977  "(incorrect codec parameters ?): %s\n",
2978  file_index, av_err2str(ret));
2979  return ret;
2980  }
2981  //assert_avoptions(of->opts);
2982  of->header_written = 1;
2983 
2984  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2985 
2986  if (sdp_filename || want_sdp)
2987  print_sdp();
2988 
2989  /* flush the muxing queues */
2990  for (i = 0; i < of->ctx->nb_streams; i++) {
2991  OutputStream *ost = output_streams[of->ost_index + i];
2992 
2993  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2994  if (!av_fifo_size(ost->muxing_queue))
2995  ost->mux_timebase = ost->st->time_base;
2996 
2997  while (av_fifo_size(ost->muxing_queue)) {
2998  AVPacket pkt;
2999  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3000  write_packet(of, &pkt, ost, 1);
3001  }
3002  }
3003 
3004  return 0;
3005 }
3006 
3008 {
3009  AVBSFContext *ctx;
3010  int i, ret;
3011 
3012  if (!ost->nb_bitstream_filters)
3013  return 0;
3014 
3015  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3016  ctx = ost->bsf_ctx[i];
3017 
3018  ret = avcodec_parameters_copy(ctx->par_in,
3019  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3020  if (ret < 0)
3021  return ret;
3022 
3023  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3024 
3025  ret = av_bsf_init(ctx);
3026  if (ret < 0) {
3027  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3028  ost->bsf_ctx[i]->filter->name);
3029  return ret;
3030  }
3031  }
3032 
3033  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3034  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3035  if (ret < 0)
3036  return ret;
3037 
3038  ost->st->time_base = ctx->time_base_out;
3039 
3040  return 0;
3041 }
3042 
3044 {
3045  OutputFile *of = output_files[ost->file_index];
3046  InputStream *ist = get_input_stream(ost);
3047  AVCodecParameters *par_dst = ost->st->codecpar;
3048  AVCodecParameters *par_src = ost->ref_par;
3049  AVRational sar;
3050  int i, ret;
3051  uint32_t codec_tag = par_dst->codec_tag;
3052 
3053  av_assert0(ist && !ost->filter);
3054 
3055  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3056  if (ret >= 0)
3057  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3058  if (ret < 0) {
3060  "Error setting up codec context options.\n");
3061  return ret;
3062  }
3063 
3064  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3065  if (ret < 0) {
3067  "Error getting reference codec parameters.\n");
3068  return ret;
3069  }
3070 
3071  if (!codec_tag) {
3072  unsigned int codec_tag_tmp;
3073  if (!of->ctx->oformat->codec_tag ||
3074  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3075  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3076  codec_tag = par_src->codec_tag;
3077  }
3078 
3079  ret = avcodec_parameters_copy(par_dst, par_src);
3080  if (ret < 0)
3081  return ret;
3082 
3083  par_dst->codec_tag = codec_tag;
3084 
3085  if (!ost->frame_rate.num)
3086  ost->frame_rate = ist->framerate;
3087  ost->st->avg_frame_rate = ost->frame_rate;
3088 
3090  if (ret < 0)
3091  return ret;
3092 
3093  // copy timebase while removing common factors
3094  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3096 
3097  // copy estimated duration as a hint to the muxer
3098  if (ost->st->duration <= 0 && ist->st->duration > 0)
3099  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3100 
3101  // copy disposition
3102  ost->st->disposition = ist->st->disposition;
3103 
3104  if (ist->st->nb_side_data) {
3105  for (i = 0; i < ist->st->nb_side_data; i++) {
3106  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3107  uint8_t *dst_data;
3108 
3109  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3110  if (!dst_data)
3111  return AVERROR(ENOMEM);
3112  memcpy(dst_data, sd_src->data, sd_src->size);
3113  }
3114  }
3115 
3116  if (ost->rotate_overridden) {
3118  sizeof(int32_t) * 9);
3119  if (sd)
3121  }
3122 
3123  switch (par_dst->codec_type) {
3124  case AVMEDIA_TYPE_AUDIO:
3125  if (audio_volume != 256) {
3126  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3127  exit_program(1);
3128  }
3129  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3130  par_dst->block_align= 0;
3131  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3132  par_dst->block_align= 0;
3133  break;
3134  case AVMEDIA_TYPE_VIDEO:
3135  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3136  sar =
3138  (AVRational){ par_dst->height, par_dst->width });
3139  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3140  "with stream copy may produce invalid files\n");
3141  }
3142  else if (ist->st->sample_aspect_ratio.num)
3143  sar = ist->st->sample_aspect_ratio;
3144  else
3145  sar = par_src->sample_aspect_ratio;
3146  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3147  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3148  ost->st->r_frame_rate = ist->st->r_frame_rate;
3149  break;
3150  }
3151 
3152  ost->mux_timebase = ist->st->time_base;
3153 
3154  return 0;
3155 }
3156 
3158 {
3159  AVDictionaryEntry *e;
3160 
3161  uint8_t *encoder_string;
3162  int encoder_string_len;
3163  int format_flags = 0;
3164  int codec_flags = ost->enc_ctx->flags;
3165 
3166  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3167  return;
3168 
3169  e = av_dict_get(of->opts, "fflags", NULL, 0);
3170  if (e) {
3171  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3172  if (!o)
3173  return;
3174  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3175  }
3176  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3177  if (e) {
3178  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3179  if (!o)
3180  return;
3181  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3182  }
3183 
3184  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3185  encoder_string = av_mallocz(encoder_string_len);
3186  if (!encoder_string)
3187  exit_program(1);
3188 
3189  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3190  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3191  else
3192  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3193  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3194  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3196 }
3197 
3198 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3199  AVCodecContext *avctx)
3200 {
3201  char *p;
3202  int n = 1, i, size, index = 0;
3203  int64_t t, *pts;
3204 
3205  for (p = kf; *p; p++)
3206  if (*p == ',')
3207  n++;
3208  size = n;
3209  pts = av_malloc_array(size, sizeof(*pts));
3210  if (!pts) {
3211  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3212  exit_program(1);
3213  }
3214 
3215  p = kf;
3216  for (i = 0; i < n; i++) {
3217  char *next = strchr(p, ',');
3218 
3219  if (next)
3220  *next++ = 0;
3221 
3222  if (!memcmp(p, "chapters", 8)) {
3223 
3224  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3225  int j;
3226 
3227  if (avf->nb_chapters > INT_MAX - size ||
3228  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3229  sizeof(*pts)))) {
3231  "Could not allocate forced key frames array.\n");
3232  exit_program(1);
3233  }
3234  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3235  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3236 
3237  for (j = 0; j < avf->nb_chapters; j++) {
3238  AVChapter *c = avf->chapters[j];
3239  av_assert1(index < size);
3240  pts[index++] = av_rescale_q(c->start, c->time_base,
3241  avctx->time_base) + t;
3242  }
3243 
3244  } else {
3245 
3246  t = parse_time_or_die("force_key_frames", p, 1);
3247  av_assert1(index < size);
3248  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3249 
3250  }
3251 
3252  p = next;
3253  }
3254 
3255  av_assert0(index == size);
3256  qsort(pts, size, sizeof(*pts), compare_int64);
3257  ost->forced_kf_count = size;
3258  ost->forced_kf_pts = pts;
3259 }
3260 
3261 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3262 {
3263  InputStream *ist = get_input_stream(ost);
3264  AVCodecContext *enc_ctx = ost->enc_ctx;
3265  AVFormatContext *oc;
3266 
3267  if (ost->enc_timebase.num > 0) {
3268  enc_ctx->time_base = ost->enc_timebase;
3269  return;
3270  }
3271 
3272  if (ost->enc_timebase.num < 0) {
3273  if (ist) {
3274  enc_ctx->time_base = ist->st->time_base;
3275  return;
3276  }
3277 
3278  oc = output_files[ost->file_index]->ctx;
3279  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3280  }
3281 
3282  enc_ctx->time_base = default_time_base;
3283 }
3284 
3286 {
3287  InputStream *ist = get_input_stream(ost);
3288  AVCodecContext *enc_ctx = ost->enc_ctx;
3290  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3291  int j, ret;
3292 
3293  set_encoder_id(output_files[ost->file_index], ost);
3294 
3295  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3296  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3297  // which have to be filtered out to prevent leaking them to output files.
3298  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3299 
3300  if (ist) {
3301  ost->st->disposition = ist->st->disposition;
3302 
3303  dec_ctx = ist->dec_ctx;
3304 
3305  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3306  } else {
3307  for (j = 0; j < oc->nb_streams; j++) {
3308  AVStream *st = oc->streams[j];
3309  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3310  break;
3311  }
3312  if (j == oc->nb_streams)
3313  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3316  }
3317 
3318  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3319  if (!ost->frame_rate.num)
3321  if (ist && !ost->frame_rate.num)
3322  ost->frame_rate = ist->framerate;
3323  if (ist && !ost->frame_rate.num)
3324  ost->frame_rate = ist->st->r_frame_rate;
3325  if (ist && !ost->frame_rate.num) {
3326  ost->frame_rate = (AVRational){25, 1};
3328  "No information "
3329  "about the input framerate is available. Falling "
3330  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3331  "if you want a different framerate.\n",
3332  ost->file_index, ost->index);
3333  }
3334 
3335  if (ost->enc->supported_framerates && !ost->force_fps) {
3336  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3337  ost->frame_rate = ost->enc->supported_framerates[idx];
3338  }
3339  // reduce frame rate for mpeg4 to be within the spec limits
3340  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3341  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3342  ost->frame_rate.num, ost->frame_rate.den, 65535);
3343  }
3344  }
3345 
3346  switch (enc_ctx->codec_type) {
3347  case AVMEDIA_TYPE_AUDIO:
3349  if (dec_ctx)
3350  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3351  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3355 
3356  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3357  break;
3358 
3359  case AVMEDIA_TYPE_VIDEO:
3361 
3362  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3364  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3366  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3367  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3368  }
3369  for (j = 0; j < ost->forced_kf_count; j++)
3370  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3372  enc_ctx->time_base);
3373 
3374  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3375  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3376  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3377  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3378  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3380 
3381  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3382  if (dec_ctx)
3383  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3384  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3385 
3386  enc_ctx->framerate = ost->frame_rate;
3387 
3388  ost->st->avg_frame_rate = ost->frame_rate;
3389 
3390  if (!dec_ctx ||
3391  enc_ctx->width != dec_ctx->width ||
3392  enc_ctx->height != dec_ctx->height ||
3393  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3395  }
3396 
3397  if (ost->top_field_first == 0) {
3398  enc_ctx->field_order = AV_FIELD_BB;
3399  } else if (ost->top_field_first == 1) {
3400  enc_ctx->field_order = AV_FIELD_TT;
3401  }
3402 
3403  if (ost->forced_keyframes) {
3404  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3407  if (ret < 0) {
3409  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3410  return ret;
3411  }
3416 
3417  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3418  // parse it only for static kf timings
3419  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3421  }
3422  }
3423  break;
3424  case AVMEDIA_TYPE_SUBTITLE:
3425  enc_ctx->time_base = AV_TIME_BASE_Q;
3426  if (!enc_ctx->width) {
3427  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3428  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3429  }
3430  break;
3431  case AVMEDIA_TYPE_DATA:
3432  break;
3433  default:
3434  abort();
3435  break;
3436  }
3437 
3438  ost->mux_timebase = enc_ctx->time_base;
3439 
3440  return 0;
3441 }
3442 
3443 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3444 {
3445  int ret = 0;
3446 
3447  if (ost->encoding_needed) {
3448  AVCodec *codec = ost->enc;
3449  AVCodecContext *dec = NULL;
3450  InputStream *ist;
3451 
3452  ret = init_output_stream_encode(ost);
3453  if (ret < 0)
3454  return ret;
3455 
3456  if ((ist = get_input_stream(ost)))
3457  dec = ist->dec_ctx;
3458  if (dec && dec->subtitle_header) {
3459  /* ASS code assumes this buffer is null terminated so add extra byte. */
3461  if (!ost->enc_ctx->subtitle_header)
3462  return AVERROR(ENOMEM);
3463  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3465  }
3466  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3467  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3468  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3469  !codec->defaults &&
3470  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3471  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3472  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3473 
3474  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3478  if (!ost->enc_ctx->hw_frames_ctx)
3479  return AVERROR(ENOMEM);
3480  } else {
3481  ret = hw_device_setup_for_encode(ost);
3482  if (ret < 0) {
3483  snprintf(error, error_len, "Device setup failed for "
3484  "encoder on output stream #%d:%d : %s",
3485  ost->file_index, ost->index, av_err2str(ret));
3486  return ret;
3487  }
3488  }
3489  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3490  int input_props = 0, output_props = 0;
3491  AVCodecDescriptor const *input_descriptor =
3493  AVCodecDescriptor const *output_descriptor =
3495  if (input_descriptor)
3496  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3497  if (output_descriptor)
3498  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3499  if (input_props && output_props && input_props != output_props) {
3500  snprintf(error, error_len,
3501  "Subtitle encoding currently only possible from text to text "
3502  "or bitmap to bitmap");
3503  return AVERROR_INVALIDDATA;
3504  }
3505  }
3506 
3507  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3508  if (ret == AVERROR_EXPERIMENTAL)
3509  abort_codec_experimental(codec, 1);
3510  snprintf(error, error_len,
3511  "Error while opening encoder for output stream #%d:%d - "
3512  "maybe incorrect parameters such as bit_rate, rate, width or height",
3513  ost->file_index, ost->index);
3514  return ret;
3515  }
3516  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3517  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3519  ost->enc_ctx->frame_size);
3521  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3522  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3523  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3524  " It takes bits/s as argument, not kbits/s\n");
3525 
3527  if (ret < 0) {
3529  "Error initializing the output stream codec context.\n");
3530  exit_program(1);
3531  }
3532  /*
3533  * FIXME: ost->st->codec should't be needed here anymore.
3534  */
3535  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3536  if (ret < 0)
3537  return ret;
3538 
3539  if (ost->enc_ctx->nb_coded_side_data) {
3540  int i;
3541 
3542  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3543  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3544  uint8_t *dst_data;
3545 
3546  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3547  if (!dst_data)
3548  return AVERROR(ENOMEM);
3549  memcpy(dst_data, sd_src->data, sd_src->size);
3550  }
3551  }
3552 
3553  /*
3554  * Add global input side data. For now this is naive, and copies it
3555  * from the input stream's global side data. All side data should
3556  * really be funneled over AVFrame and libavfilter, then added back to
3557  * packet side data, and then potentially using the first packet for
3558  * global side data.
3559  */
3560  if (ist) {
3561  int i;
3562  for (i = 0; i < ist->st->nb_side_data; i++) {
3563  AVPacketSideData *sd = &ist->st->side_data[i];
3564  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3565  if (!dst)
3566  return AVERROR(ENOMEM);
3567  memcpy(dst, sd->data, sd->size);
3568  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3569  av_display_rotation_set((uint32_t *)dst, 0);
3570  }
3571  }
3572 
3573  // copy timebase while removing common factors
3574  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3575  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3576 
3577  // copy estimated duration as a hint to the muxer
3578  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3579  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3580 
3581  ost->st->codec->codec= ost->enc_ctx->codec;
3582  } else if (ost->stream_copy) {
3583  ret = init_output_stream_streamcopy(ost);
3584  if (ret < 0)
3585  return ret;
3586  }
3587 
3588  // parse user provided disposition, and update stream values
3589  if (ost->disposition) {
3590  static const AVOption opts[] = {
3591  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3592  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3593  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3594  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3595  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3596  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3597  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3598  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3599  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3600  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3601  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3602  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3603  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3604  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3605  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3606  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3607  { NULL },
3608  };
3609  static const AVClass class = {
3610  .class_name = "",
3611  .item_name = av_default_item_name,
3612  .option = opts,
3613  .version = LIBAVUTIL_VERSION_INT,
3614  };
3615  const AVClass *pclass = &class;
3616 
3617  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3618  if (ret < 0)
3619  return ret;
3620  }
3621 
3622  /* initialize bitstream filters for the output stream
3623  * needs to be done here, because the codec id for streamcopy is not
3624  * known until now */
3625  ret = init_output_bsfs(ost);
3626  if (ret < 0)
3627  return ret;
3628 
3629  ost->initialized = 1;
3630 
3631  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3632  if (ret < 0)
3633  return ret;
3634 
3635  return ret;
3636 }
3637 
3638 static void report_new_stream(int input_index, AVPacket *pkt)
3639 {
3640  InputFile *file = input_files[input_index];
3641  AVStream *st = file->ctx->streams[pkt->stream_index];
3642 
3643  if (pkt->stream_index < file->nb_streams_warn)
3644  return;
3645  av_log(file->ctx, AV_LOG_WARNING,
3646  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3648  input_index, pkt->stream_index,
3649  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3650  file->nb_streams_warn = pkt->stream_index + 1;
3651 }
3652 
3653 static int transcode_init(void)
3654 {
3655  int ret = 0, i, j, k;
3656  AVFormatContext *oc;
3657  OutputStream *ost;
3658  InputStream *ist;
3659  char error[1024] = {0};
3660 
3661  for (i = 0; i < nb_filtergraphs; i++) {
3662  FilterGraph *fg = filtergraphs[i];
3663  for (j = 0; j < fg->nb_outputs; j++) {
3664  OutputFilter *ofilter = fg->outputs[j];
3665  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3666  continue;
3667  if (fg->nb_inputs != 1)
3668  continue;
3669  for (k = nb_input_streams-1; k >= 0 ; k--)
3670  if (fg->inputs[0]->ist == input_streams[k])
3671  break;
3672  ofilter->ost->source_index = k;
3673  }
3674  }
3675 
3676  /* init framerate emulation */
3677  for (i = 0; i < nb_input_files; i++) {
3678  InputFile *ifile = input_files[i];
3679  if (ifile->rate_emu)
3680  for (j = 0; j < ifile->nb_streams; j++)
3681  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3682  }
3683 
3684  /* init input streams */
3685  for (i = 0; i < nb_input_streams; i++)
3686  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3687  for (i = 0; i < nb_output_streams; i++) {
3688  ost = output_streams[i];
3689  avcodec_close(ost->enc_ctx);
3690  }
3691  goto dump_format;
3692  }
3693 
3694  /* open each encoder */
3695  for (i = 0; i < nb_output_streams; i++) {
3696  // skip streams fed from filtergraphs until we have a frame for them
3697  if (output_streams[i]->filter)
3698  continue;
3699 
3700  ret = init_output_stream(output_streams[i], error, sizeof(error));
3701  if (ret < 0)
3702  goto dump_format;
3703  }
3704 
3705  /* discard unused programs */
3706  for (i = 0; i < nb_input_files; i++) {
3707  InputFile *ifile = input_files[i];
3708  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3709  AVProgram *p = ifile->ctx->programs[j];
3710  int discard = AVDISCARD_ALL;
3711 
3712  for (k = 0; k < p->nb_stream_indexes; k++)
3713  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3714  discard = AVDISCARD_DEFAULT;
3715  break;
3716  }
3717  p->discard = discard;
3718  }
3719  }
3720 
3721  /* write headers for files with no streams */
3722  for (i = 0; i < nb_output_files; i++) {
3723  oc = output_files[i]->ctx;
3724  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3725  ret = check_init_output_file(output_files[i], i);
3726  if (ret < 0)
3727  goto dump_format;
3728  }
3729  }
3730 
3731  dump_format:
3732  /* dump the stream mapping */
3733  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3734  for (i = 0; i < nb_input_streams; i++) {
3735  ist = input_streams[i];
3736 
3737  for (j = 0; j < ist->nb_filters; j++) {
3738  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3739  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3740  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3741  ist->filters[j]->name);
3742  if (nb_filtergraphs > 1)
3743  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3744  av_log(NULL, AV_LOG_INFO, "\n");
3745  }
3746  }
3747  }
3748 
3749  for (i = 0; i < nb_output_streams; i++) {
3750  ost = output_streams[i];
3751 
3752  if (ost->attachment_filename) {
3753  /* an attached file */
3754  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3755  ost->attachment_filename, ost->file_index, ost->index);
3756  continue;
3757  }
3758 
3759  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3760  /* output from a complex graph */
3761  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3762  if (nb_filtergraphs > 1)
3763  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3764 
3765  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3766  ost->index, ost->enc ? ost->enc->name : "?");
3767  continue;
3768  }
3769 
3770  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3771  input_streams[ost->source_index]->file_index,
3772  input_streams[ost->source_index]->st->index,
3773  ost->file_index,
3774  ost->index);
3775  if (ost->sync_ist != input_streams[ost->source_index])
3776  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3777  ost->sync_ist->file_index,
3778  ost->sync_ist->st->index);
3779  if (ost->stream_copy)
3780  av_log(NULL, AV_LOG_INFO, " (copy)");
3781  else {
3782  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3783  const AVCodec *out_codec = ost->enc;
3784  const char *decoder_name = "?";
3785  const char *in_codec_name = "?";
3786  const char *encoder_name = "?";
3787  const char *out_codec_name = "?";
3788  const AVCodecDescriptor *desc;
3789 
3790  if (in_codec) {
3791  decoder_name = in_codec->name;
3792  desc = avcodec_descriptor_get(in_codec->id);
3793  if (desc)
3794  in_codec_name = desc->name;
3795  if (!strcmp(decoder_name, in_codec_name))
3796  decoder_name = "native";
3797  }
3798 
3799  if (out_codec) {
3800  encoder_name = out_codec->name;
3801  desc = avcodec_descriptor_get(out_codec->id);
3802  if (desc)
3803  out_codec_name = desc->name;
3804  if (!strcmp(encoder_name, out_codec_name))
3805  encoder_name = "native";
3806  }
3807 
3808  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3809  in_codec_name, decoder_name,
3810  out_codec_name, encoder_name);
3811  }
3812  av_log(NULL, AV_LOG_INFO, "\n");
3813  }
3814 
3815  if (ret) {
3816  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3817  return ret;
3818  }
3819 
3821 
3822  return 0;
3823 }
3824 
3825 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3826 static int need_output(void)
3827 {
3828  int i;
3829 
3830  for (i = 0; i < nb_output_streams; i++) {
3831  OutputStream *ost = output_streams[i];
3832  OutputFile *of = output_files[ost->file_index];
3833  AVFormatContext *os = output_files[ost->file_index]->ctx;
3834 
3835  if (ost->finished ||
3836  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3837  continue;
3838  if (ost->frame_number >= ost->max_frames) {
3839  int j;
3840  for (j = 0; j < of->ctx->nb_streams; j++)
3841  close_output_stream(output_streams[of->ost_index + j]);
3842  continue;
3843  }
3844 
3845  return 1;
3846  }
3847 
3848  return 0;
3849 }
3850 
3851 /**
3852  * Select the output stream to process.
3853  *
3854  * @return selected output stream, or NULL if none available
3855  */
3857 {
3858  int i;
3859  int64_t opts_min = INT64_MAX;
3860  OutputStream *ost_min = NULL;
3861 
3862  for (i = 0; i < nb_output_streams; i++) {
3863  OutputStream *ost = output_streams[i];
3864  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3865  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3866  AV_TIME_BASE_Q);
3867  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3869  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3870  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3871 
3872  if (!ost->initialized && !ost->inputs_done)
3873  return ost;
3874 
3875  if (!ost->finished && opts < opts_min) {
3876  opts_min = opts;
3877  ost_min = ost->unavailable ? NULL : ost;
3878  }
3879  }
3880  return ost_min;
3881 }
3882 
3883 static void set_tty_echo(int on)
3884 {
3885 #if HAVE_TERMIOS_H
3886  struct termios tty;
3887  if (tcgetattr(0, &tty) == 0) {
3888  if (on) tty.c_lflag |= ECHO;
3889  else tty.c_lflag &= ~ECHO;
3890  tcsetattr(0, TCSANOW, &tty);
3891  }
3892 #endif
3893 }
3894 
3895 static int check_keyboard_interaction(int64_t cur_time)
3896 {
3897  int i, ret, key;
3898  static int64_t last_time;
3899  if (received_nb_signals)
3900  return AVERROR_EXIT;
3901  /* read_key() returns 0 on EOF */
3902  if(cur_time - last_time >= 100000 && !run_as_daemon){
3903  key = read_key();
3904  last_time = cur_time;
3905  }else
3906  key = -1;
3907  if (key == 'q')
3908  return AVERROR_EXIT;
3909  if (key == '+') av_log_set_level(av_log_get_level()+10);
3910  if (key == '-') av_log_set_level(av_log_get_level()-10);
3911  if (key == 's') qp_hist ^= 1;
3912  if (key == 'h'){
3913  if (do_hex_dump){
3914  do_hex_dump = do_pkt_dump = 0;
3915  } else if(do_pkt_dump){
3916  do_hex_dump = 1;
3917  } else
3918  do_pkt_dump = 1;
3920  }
3921  if (key == 'c' || key == 'C'){
3922  char buf[4096], target[64], command[256], arg[256] = {0};
3923  double time;
3924  int k, n = 0;
3925  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3926  i = 0;
3927  set_tty_echo(1);
3928  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3929  if (k > 0)
3930  buf[i++] = k;
3931  buf[i] = 0;
3932  set_tty_echo(0);
3933  fprintf(stderr, "\n");
3934  if (k > 0 &&
3935  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3936  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3937  target, time, command, arg);
3938  for (i = 0; i < nb_filtergraphs; i++) {
3939  FilterGraph *fg = filtergraphs[i];
3940  if (fg->graph) {
3941  if (time < 0) {
3942  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3943  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3944  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3945  } else if (key == 'c') {
3946  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3947  ret = AVERROR_PATCHWELCOME;
3948  } else {
3949  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3950  if (ret < 0)
3951  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3952  }
3953  }
3954  }
3955  } else {
3957  "Parse error, at least 3 arguments were expected, "
3958  "only %d given in string '%s'\n", n, buf);
3959  }
3960  }
3961  if (key == 'd' || key == 'D'){
3962  int debug=0;
3963  if(key == 'D') {
3964  debug = input_streams[0]->st->codec->debug<<1;
3965  if(!debug) debug = 1;
3966  while(debug & (FF_DEBUG_DCT_COEFF
3967 #if FF_API_DEBUG_MV
3968  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3969 #endif
3970  )) //unsupported, would just crash
3971  debug += debug;
3972  }else{
3973  char buf[32];
3974  int k = 0;
3975  i = 0;
3976  set_tty_echo(1);
3977  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3978  if (k > 0)
3979  buf[i++] = k;
3980  buf[i] = 0;
3981  set_tty_echo(0);
3982  fprintf(stderr, "\n");
3983  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3984  fprintf(stderr,"error parsing debug value\n");
3985  }
3986  for(i=0;i<nb_input_streams;i++) {
3987  input_streams[i]->st->codec->debug = debug;
3988  }
3989  for(i=0;i<nb_output_streams;i++) {
3990  OutputStream *ost = output_streams[i];
3991  ost->enc_ctx->debug = debug;
3992  }
3993  if(debug) av_log_set_level(AV_LOG_DEBUG);
3994  fprintf(stderr,"debug=%d\n", debug);
3995  }
3996  if (key == '?'){
3997  fprintf(stderr, "key function\n"
3998  "? show this help\n"
3999  "+ increase verbosity\n"
4000  "- decrease verbosity\n"
4001  "c Send command to first matching filter supporting it\n"
4002  "C Send/Queue command to all matching filters\n"
4003  "D cycle through available debug modes\n"
4004  "h dump packets/hex press to cycle through the 3 states\n"
4005  "q quit\n"
4006  "s Show QP histogram\n"
4007  );
4008  }
4009  return 0;
4010 }
4011 
4012 #if HAVE_THREADS
4013 static void *input_thread(void *arg)
4014 {
4015  InputFile *f = arg;
4016  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4017  int ret = 0;
4018 
4019  while (1) {
4020  AVPacket pkt;
4021  ret = av_read_frame(f->ctx, &pkt);
4022 
4023  if (ret == AVERROR(EAGAIN)) {
4024  av_usleep(10000);
4025  continue;
4026  }
4027  if (ret < 0) {
4028  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4029  break;
4030  }
4031  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4032  if (flags && ret == AVERROR(EAGAIN)) {
4033  flags = 0;
4034  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4036  "Thread message queue blocking; consider raising the "
4037  "thread_queue_size option (current value: %d)\n",
4038  f->thread_queue_size);
4039  }
4040  if (ret < 0) {
4041  if (ret != AVERROR_EOF)
4042  av_log(f->ctx, AV_LOG_ERROR,
4043  "Unable to send packet to main thread: %s\n",
4044  av_err2str(ret));
4045  av_packet_unref(&pkt);
4046  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4047  break;
4048  }
4049  }
4050 
4051  return NULL;
4052 }
4053 
4054 static void free_input_thread(int i)
4055 {
4056  InputFile *f = input_files[i];
4057  AVPacket pkt;
4058 
4059  if (!f || !f->in_thread_queue)
4060  return;
4062  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4063  av_packet_unref(&pkt);
4064 
4065  pthread_join(f->thread, NULL);
4066  f->joined = 1;
4067  av_thread_message_queue_free(&f->in_thread_queue);
4068 }
4069 
4070 static void free_input_threads(void)
4071 {
4072  int i;
4073 
4074  for (i = 0; i < nb_input_files; i++)
4075  free_input_thread(i);
4076 }
4077 
4078 static int init_input_thread(int i)
4079 {
4080  int ret;
4081  InputFile *f = input_files[i];
4082 
4083  if (nb_input_files == 1)
4084  return 0;
4085 
4086  if (f->ctx->pb ? !f->ctx->pb->seekable :
4087  strcmp(f->ctx->iformat->name, "lavfi"))
4088  f->non_blocking = 1;
4089  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4090  f->thread_queue_size, sizeof(AVPacket));
4091  if (ret < 0)
4092  return ret;
4093 
4094  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4095  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4096  av_thread_message_queue_free(&f->in_thread_queue);
4097  return AVERROR(ret);
4098  }
4099 
4100  return 0;
4101 }
4102 
4103 static int init_input_threads(void)
4104 {
4105  int i, ret;
4106 
4107  for (i = 0; i < nb_input_files; i++) {
4108  ret = init_input_thread(i);
4109  if (ret < 0)
4110  return ret;
4111  }
4112  return 0;
4113 }
4114 
4115 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4116 {
4117  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4118  f->non_blocking ?
4120 }
4121 #endif
4122 
4124 {
4125  if (f->rate_emu) {
4126  int i;
4127  for (i = 0; i < f->nb_streams; i++) {
4128  InputStream *ist = input_streams[f->ist_index + i];
4129  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4130  int64_t now = av_gettime_relative() - ist->start;
4131  if (pts > now)
4132  return AVERROR(EAGAIN);
4133  }
4134  }
4135 
4136 #if HAVE_THREADS
4137  if (nb_input_files > 1)
4138  return get_input_packet_mt(f, pkt);
4139 #endif
4140  return av_read_frame(f->ctx, pkt);
4141 }
4142 
4143 static int got_eagain(void)
4144 {
4145  int i;
4146  for (i = 0; i < nb_output_streams; i++)
4147  if (output_streams[i]->unavailable)
4148  return 1;
4149  return 0;
4150 }
4151 
4152 static void reset_eagain(void)
4153 {
4154  int i;
4155  for (i = 0; i < nb_input_files; i++)
4156  input_files[i]->eagain = 0;
4157  for (i = 0; i < nb_output_streams; i++)
4158  output_streams[i]->unavailable = 0;
4159 }
4160 
4161 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4162 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4163  AVRational time_base)
4164 {
4165  int ret;
4166 
4167  if (!*duration) {
4168  *duration = tmp;
4169  return tmp_time_base;
4170  }
4171 
4172  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4173  if (ret < 0) {
4174  *duration = tmp;
4175  return tmp_time_base;
4176  }
4177 
4178  return time_base;
4179 }
4180 
4182 {
4183  InputStream *ist;
4184  AVCodecContext *avctx;
4185  int i, ret, has_audio = 0;
4186  int64_t duration = 0;
4187 
4188  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4189  if (ret < 0)
4190  return ret;
4191 
4192  for (i = 0; i < ifile->nb_streams; i++) {
4193  ist = input_streams[ifile->ist_index + i];
4194  avctx = ist->dec_ctx;
4195 
4196  /* duration is the length of the last frame in a stream
4197  * when audio stream is present we don't care about
4198  * last video frame length because it's not defined exactly */
4199  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4200  has_audio = 1;
4201  }
4202 
4203  for (i = 0; i < ifile->nb_streams; i++) {
4204  ist = input_streams[ifile->ist_index + i];
4205  avctx = ist->dec_ctx;
4206 
4207  if (has_audio) {
4208  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4209  AVRational sample_rate = {1, avctx->sample_rate};
4210 
4211  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4212  } else {
4213  continue;
4214  }
4215  } else {
4216  if (ist->framerate.num) {
4217  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4218  } else if (ist->st->avg_frame_rate.num) {
4219  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4220  } else {
4221  duration = 1;
4222  }
4223  }
4224  if (!ifile->duration)
4225  ifile->time_base = ist->st->time_base;
4226  /* the total duration of the stream, max_pts - min_pts is
4227  * the duration of the stream without the last frame */
4228  duration += ist->max_pts - ist->min_pts;
4229  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4230  ifile->time_base);
4231  }
4232 
4233  if (ifile->loop > 0)
4234  ifile->loop--;
4235 
4236  return ret;
4237 }
4238 
4239 /*
4240  * Return
4241  * - 0 -- one packet was read and processed
4242  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4243  * this function should be called again
4244  * - AVERROR_EOF -- this function should not be called again
4245  */
4246 static int process_input(int file_index)
4247 {
4248  InputFile *ifile = input_files[file_index];
4250  InputStream *ist;
4251  AVPacket pkt;
4252  int ret, thread_ret, i, j;
4253  int64_t duration;
4254  int64_t pkt_dts;
4255 
4256  is = ifile->ctx;
4257  ret = get_input_packet(ifile, &pkt);
4258 
4259  if (ret == AVERROR(EAGAIN)) {
4260  ifile->eagain = 1;
4261  return ret;
4262  }
4263  if (ret < 0 && ifile->loop) {
4264  AVCodecContext *avctx;
4265  for (i = 0; i < ifile->nb_streams; i++) {
4266  ist = input_streams[ifile->ist_index + i];
4267  avctx = ist->dec_ctx;
4268  if (ist->decoding_needed) {
4269  ret = process_input_packet(ist, NULL, 1);
4270  if (ret>0)
4271  return 0;
4272  avcodec_flush_buffers(avctx);
4273  }
4274  }
4275 #if HAVE_THREADS
4276  free_input_thread(file_index);
4277 #endif
4278  ret = seek_to_start(ifile, is);
4279 #if HAVE_THREADS
4280  thread_ret = init_input_thread(file_index);
4281  if (thread_ret < 0)
4282  return thread_ret;
4283 #endif
4284  if (ret < 0)
4285  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4286  else
4287  ret = get_input_packet(ifile, &pkt);
4288  if (ret == AVERROR(EAGAIN)) {
4289  ifile->eagain = 1;
4290  return ret;
4291  }
4292  }
4293  if (ret < 0) {
4294  if (ret != AVERROR_EOF) {
4295  print_error(is->url, ret);
4296  if (exit_on_error)
4297  exit_program(1);
4298  }
4299 
4300  for (i = 0; i < ifile->nb_streams; i++) {
4301  ist = input_streams[ifile->ist_index + i];
4302  if (ist->decoding_needed) {
4303  ret = process_input_packet(ist, NULL, 0);
4304  if (ret>0)
4305  return 0;
4306  }
4307 
4308  /* mark all outputs that don't go through lavfi as finished */
4309  for (j = 0; j < nb_output_streams; j++) {
4310  OutputStream *ost = output_streams[j];
4311 
4312  if (ost->source_index == ifile->ist_index + i &&
4313  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4314  finish_output_stream(ost);
4315  }
4316  }
4317 
4318  ifile->eof_reached = 1;
4319  return AVERROR(EAGAIN);
4320  }
4321 
4322  reset_eagain();
4323 
4324  if (do_pkt_dump) {
4326  is->streams[pkt.stream_index]);
4327  }
4328  /* the following test is needed in case new streams appear
4329  dynamically in stream : we ignore them */
4330  if (pkt.stream_index >= ifile->nb_streams) {
4331  report_new_stream(file_index, &pkt);
4332  goto discard_packet;
4333  }
4334 
4335  ist = input_streams[ifile->ist_index + pkt.stream_index];
4336 
4337  ist->data_size += pkt.size;
4338  ist->nb_packets++;
4339 
4340  if (ist->discard)
4341  goto discard_packet;
4342 
4343  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4345  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4346  if (exit_on_error)
4347  exit_program(1);
4348  }
4349 
4350  if (debug_ts) {
4351  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4352  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4356  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4357  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4358  av_ts2str(input_files[ist->file_index]->ts_offset),
4359  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4360  }
4361 
4362  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4363  int64_t stime, stime2;
4364  // Correcting starttime based on the enabled streams
4365  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4366  // so we instead do it here as part of discontinuity handling
4367  if ( ist->next_dts == AV_NOPTS_VALUE
4368  && ifile->ts_offset == -is->start_time
4369  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4370  int64_t new_start_time = INT64_MAX;
4371  for (i=0; i<is->nb_streams; i++) {
4372  AVStream *st = is->streams[i];
4373  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4374  continue;
4375  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4376  }
4377  if (new_start_time > is->start_time) {
4378  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4379  ifile->ts_offset = -new_start_time;
4380  }
4381  }
4382 
4383  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4384  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4385  ist->wrap_correction_done = 1;
4386 
4387  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4388  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4389  ist->wrap_correction_done = 0;
4390  }
4391  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4392  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4393  ist->wrap_correction_done = 0;
4394  }
4395  }
4396 
4397  /* add the stream-global side data to the first packet */
4398  if (ist->nb_packets == 1) {
4399  for (i = 0; i < ist->st->nb_side_data; i++) {
4400  AVPacketSideData *src_sd = &ist->st->side_data[i];
4401  uint8_t *dst_data;
4402 
4403  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4404  continue;
4405 
4406  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4407  continue;
4408 
4409  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4410  if (!dst_data)
4411  exit_program(1);
4412 
4413  memcpy(dst_data, src_sd->data, src_sd->size);
4414  }
4415  }
4416 
4417  if (pkt.dts != AV_NOPTS_VALUE)
4418  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4419  if (pkt.pts != AV_NOPTS_VALUE)
4420  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4421 
4422  if (pkt.pts != AV_NOPTS_VALUE)
4423  pkt.pts *= ist->ts_scale;
4424  if (pkt.dts != AV_NOPTS_VALUE)
4425  pkt.dts *= ist->ts_scale;
4426 
4428  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4430  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4431  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4432  int64_t delta = pkt_dts - ifile->last_ts;
4433  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4434  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4435  ifile->ts_offset -= delta;
4437  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4438  delta, ifile->ts_offset);
4439  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4440  if (pkt.pts != AV_NOPTS_VALUE)
4441  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4442  }
4443  }
4444 
4445  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4446  if (pkt.pts != AV_NOPTS_VALUE) {
4447  pkt.pts += duration;
4448  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4449  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4450  }
4451 
4452  if (pkt.dts != AV_NOPTS_VALUE)
4453  pkt.dts += duration;
4454 
4456  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4458  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4459  !copy_ts) {
4460  int64_t delta = pkt_dts - ist->next_dts;
4461  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4462  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4463  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4464  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4465  ifile->ts_offset -= delta;
4467  "timestamp discontinuity for stream #%d:%d "
4468  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4469  ist->file_index, ist->st->index, ist->st->id,
4471  delta, ifile->ts_offset);
4472  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4473  if (pkt.pts != AV_NOPTS_VALUE)
4474  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4475  }
4476  } else {
4477  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4478  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4479  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4480  pkt.dts = AV_NOPTS_VALUE;
4481  }
4482  if (pkt.pts != AV_NOPTS_VALUE){
4483  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4484  delta = pkt_pts - ist->next_dts;
4485  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4486  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4487  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4488  pkt.pts = AV_NOPTS_VALUE;
4489  }
4490  }
4491  }
4492  }
4493 
4494  if (pkt.dts != AV_NOPTS_VALUE)
4495  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4496 
4497  if (debug_ts) {
4498  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4500  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4501  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4502  av_ts2str(input_files[ist->file_index]->ts_offset),
4503  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4504  }
4505 
4506  sub2video_heartbeat(ist, pkt.pts);
4507 
4508  process_input_packet(ist, &pkt, 0);
4509 
4510 discard_packet:
4511  av_packet_unref(&pkt);
4512 
4513  return 0;
4514 }
4515 
4516 /**
4517  * Perform a step of transcoding for the specified filter graph.
4518  *
4519  * @param[in] graph filter graph to consider
4520  * @param[out] best_ist input stream where a frame would allow to continue
4521  * @return 0 for success, <0 for error
4522  */
4523 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4524 {
4525  int i, ret;
4526  int nb_requests, nb_requests_max = 0;
4527  InputFilter *ifilter;
4528  InputStream *ist;
4529 
4530  *best_ist = NULL;
4531  ret = avfilter_graph_request_oldest(graph->graph);
4532  if (ret >= 0)
4533  return reap_filters(0);
4534 
4535  if (ret == AVERROR_EOF) {
4536  ret = reap_filters(1);
4537  for (i = 0; i < graph->nb_outputs; i++)
4538  close_output_stream(graph->outputs[i]->ost);
4539  return ret;
4540  }
4541  if (ret != AVERROR(EAGAIN))
4542  return ret;
4543 
4544  for (i = 0; i < graph->nb_inputs; i++) {
4545  ifilter = graph->inputs[i];
4546  ist = ifilter->ist;
4547  if (input_files[ist->file_index]->eagain ||
4548  input_files[ist->file_index]->eof_reached)
4549  continue;
4550  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4551  if (nb_requests > nb_requests_max) {
4552  nb_requests_max = nb_requests;
4553  *best_ist = ist;
4554  }
4555  }
4556 
4557  if (!*best_ist)
4558  for (i = 0; i < graph->nb_outputs; i++)
4559  graph->outputs[i]->ost->unavailable = 1;
4560 
4561  return 0;
4562 }