FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570 
573 
574  if (ost->muxing_queue) {
575  while (av_fifo_size(ost->muxing_queue)) {
576  AVPacket pkt;
577  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578  av_packet_unref(&pkt);
579  }
581  }
582 
583  av_freep(&output_streams[i]);
584  }
585 #if HAVE_THREADS
586  free_input_threads();
587 #endif
588  for (i = 0; i < nb_input_files; i++) {
589  avformat_close_input(&input_files[i]->ctx);
590  av_freep(&input_files[i]);
591  }
592  for (i = 0; i < nb_input_streams; i++) {
593  InputStream *ist = input_streams[i];
594 
597  av_dict_free(&ist->decoder_opts);
600  av_freep(&ist->filters);
601  av_freep(&ist->hwaccel_device);
602  av_freep(&ist->dts_buffer);
603 
605 
606  av_freep(&input_streams[i]);
607  }
608 
609  if (vstats_file) {
610  if (fclose(vstats_file))
612  "Error closing vstats file, loss of information possible: %s\n",
613  av_err2str(AVERROR(errno)));
614  }
616 
617  av_freep(&input_streams);
618  av_freep(&input_files);
619  av_freep(&output_streams);
620  av_freep(&output_files);
621 
622  uninit_opts();
623 
625 
626  if (received_sigterm) {
627  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628  (int) received_sigterm);
629  } else if (ret && atomic_load(&transcode_init_done)) {
630  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
631  }
632  term_exit();
633  ffmpeg_exited = 1;
634 }
635 
637 {
638  AVDictionaryEntry *t = NULL;
639 
640  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
642  }
643 }
644 
646 {
648  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650  exit_program(1);
651  }
652 }
653 
654 static void abort_codec_experimental(AVCodec *c, int encoder)
655 {
656  exit_program(1);
657 }
658 
659 static void update_benchmark(const char *fmt, ...)
660 {
661  if (do_benchmark_all) {
663  va_list va;
664  char buf[1024];
665 
666  if (fmt) {
667  va_start(va, fmt);
668  vsnprintf(buf, sizeof(buf), fmt, va);
669  va_end(va);
671  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672  t.user_usec - current_time.user_usec,
673  t.sys_usec - current_time.sys_usec,
674  t.real_usec - current_time.real_usec, buf);
675  }
676  current_time = t;
677  }
678 }
679 
681 {
682  int i;
683  for (i = 0; i < nb_output_streams; i++) {
684  OutputStream *ost2 = output_streams[i];
685  ost2->finished |= ost == ost2 ? this_stream : others;
686  }
687 }
688 
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
690 {
691  AVFormatContext *s = of->ctx;
692  AVStream *st = ost->st;
693  int ret;
694 
695  /*
696  * Audio encoders may split the packets -- #frames in != #packets out.
697  * But there is no reordering, so we can limit the number of output packets
698  * by simply dropping them here.
699  * Counting encoded video frames needs to be done separately because of
700  * reordering, see do_video_out().
701  * Do not count the packet when unqueued because it has been counted when queued.
702  */
703  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704  if (ost->frame_number >= ost->max_frames) {
705  av_packet_unref(pkt);
706  return;
707  }
708  ost->frame_number++;
709  }
710 
711  if (!of->header_written) {
712  AVPacket tmp_pkt = {0};
713  /* the muxer is not initialized yet, buffer the packet */
714  if (!av_fifo_space(ost->muxing_queue)) {
715  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716  ost->max_muxing_queue_size);
717  if (new_size <= av_fifo_size(ost->muxing_queue)) {
719  "Too many packets buffered for output stream %d:%d.\n",
720  ost->file_index, ost->st->index);
721  exit_program(1);
722  }
723  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
724  if (ret < 0)
725  exit_program(1);
726  }
727  ret = av_packet_make_refcounted(pkt);
728  if (ret < 0)
729  exit_program(1);
730  av_packet_move_ref(&tmp_pkt, pkt);
731  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
732  return;
733  }
734 
737  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
738 
739  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
740  int i;
742  NULL);
743  ost->quality = sd ? AV_RL32(sd) : -1;
744  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
745 
746  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
747  if (sd && i < sd[5])
748  ost->error[i] = AV_RL64(sd + 8 + 8*i);
749  else
750  ost->error[i] = -1;
751  }
752 
753  if (ost->frame_rate.num && ost->is_cfr) {
754  if (pkt->duration > 0)
755  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
757  ost->mux_timebase);
758  }
759  }
760 
761  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
762 
763  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764  if (pkt->dts != AV_NOPTS_VALUE &&
765  pkt->pts != AV_NOPTS_VALUE &&
766  pkt->dts > pkt->pts) {
767  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
768  pkt->dts, pkt->pts,
769  ost->file_index, ost->st->index);
770  pkt->pts =
771  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
774  }
776  pkt->dts != AV_NOPTS_VALUE &&
777  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778  ost->last_mux_dts != AV_NOPTS_VALUE) {
779  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780  if (pkt->dts < max) {
781  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782  av_log(s, loglevel, "Non-monotonous DTS in output stream "
783  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
785  if (exit_on_error) {
786  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
787  exit_program(1);
788  }
789  av_log(s, loglevel, "changing to %"PRId64". This may result "
790  "in incorrect timestamps in the output file.\n",
791  max);
792  if (pkt->pts >= pkt->dts)
793  pkt->pts = FFMAX(pkt->pts, max);
794  pkt->dts = max;
795  }
796  }
797  }
798  ost->last_mux_dts = pkt->dts;
799 
800  ost->data_size += pkt->size;
801  ost->packets_written++;
802 
803  pkt->stream_index = ost->index;
804 
805  if (debug_ts) {
806  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
811  pkt->size
812  );
813  }
814 
815  ret = av_interleaved_write_frame(s, pkt);
816  if (ret < 0) {
817  print_error("av_interleaved_write_frame()", ret);
818  main_return_code = 1;
820  }
821  av_packet_unref(pkt);
822 }
823 
825 {
826  OutputFile *of = output_files[ost->file_index];
827 
828  ost->finished |= ENCODER_FINISHED;
829  if (of->shortest) {
830  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831  of->recording_time = FFMIN(of->recording_time, end);
832  }
833 }
834 
835 /*
836  * Send a single packet to the output, applying any bitstream filters
837  * associated with the output stream. This may result in any number
838  * of packets actually being written, depending on what bitstream
839  * filters are applied. The supplied packet is consumed and will be
840  * blank (as if newly-allocated) when this function returns.
841  *
842  * If eof is set, instead indicate EOF to all bitstream filters and
843  * therefore flush any delayed packets to the output. A blank packet
844  * must be supplied in this case.
845  */
847  OutputStream *ost, int eof)
848 {
849  int ret = 0;
850 
851  /* apply the output bitstream filters, if any */
852  if (ost->nb_bitstream_filters) {
853  int idx;
854 
855  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
856  if (ret < 0)
857  goto finish;
858 
859  eof = 0;
860  idx = 1;
861  while (idx) {
862  /* get a packet from the previous filter up the chain */
863  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864  if (ret == AVERROR(EAGAIN)) {
865  ret = 0;
866  idx--;
867  continue;
868  } else if (ret == AVERROR_EOF) {
869  eof = 1;
870  } else if (ret < 0)
871  goto finish;
872 
873  /* send it to the next filter down the chain or to the muxer */
874  if (idx < ost->nb_bitstream_filters) {
875  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
876  if (ret < 0)
877  goto finish;
878  idx++;
879  eof = 0;
880  } else if (eof)
881  goto finish;
882  else
883  write_packet(of, pkt, ost, 0);
884  }
885  } else if (!eof)
886  write_packet(of, pkt, ost, 0);
887 
888 finish:
889  if (ret < 0 && ret != AVERROR_EOF) {
890  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
892  if(exit_on_error)
893  exit_program(1);
894  }
895 }
896 
898 {
899  OutputFile *of = output_files[ost->file_index];
900 
901  if (of->recording_time != INT64_MAX &&
903  AV_TIME_BASE_Q) >= 0) {
904  close_output_stream(ost);
905  return 0;
906  }
907  return 1;
908 }
909 
911  AVFrame *frame)
912 {
913  AVCodecContext *enc = ost->enc_ctx;
914  AVPacket pkt;
915  int ret;
916 
917  av_init_packet(&pkt);
918  pkt.data = NULL;
919  pkt.size = 0;
920 
921  if (!check_recording_time(ost))
922  return;
923 
924  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925  frame->pts = ost->sync_opts;
926  ost->sync_opts = frame->pts + frame->nb_samples;
927  ost->samples_encoded += frame->nb_samples;
928  ost->frames_encoded++;
929 
930  av_assert0(pkt.size || !pkt.data);
932  if (debug_ts) {
933  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936  enc->time_base.num, enc->time_base.den);
937  }
938 
939  ret = avcodec_send_frame(enc, frame);
940  if (ret < 0)
941  goto error;
942 
943  while (1) {
944  ret = avcodec_receive_packet(enc, &pkt);
945  if (ret == AVERROR(EAGAIN))
946  break;
947  if (ret < 0)
948  goto error;
949 
950  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
951 
952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
953 
954  if (debug_ts) {
955  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
959  }
960 
961  output_packet(of, &pkt, ost, 0);
962  }
963 
964  return;
965 error:
966  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
967  exit_program(1);
968 }
969 
970 static void do_subtitle_out(OutputFile *of,
971  OutputStream *ost,
972  AVSubtitle *sub)
973 {
974  int subtitle_out_max_size = 1024 * 1024;
975  int subtitle_out_size, nb, i;
976  AVCodecContext *enc;
977  AVPacket pkt;
978  int64_t pts;
979 
980  if (sub->pts == AV_NOPTS_VALUE) {
981  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982  if (exit_on_error)
983  exit_program(1);
984  return;
985  }
986 
987  enc = ost->enc_ctx;
988 
989  if (!subtitle_out) {
990  subtitle_out = av_malloc(subtitle_out_max_size);
991  if (!subtitle_out) {
992  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
993  exit_program(1);
994  }
995  }
996 
997  /* Note: DVB subtitle need one packet to draw them and one other
998  packet to clear them */
999  /* XXX: signal it in the codec context ? */
1000  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1001  nb = 2;
1002  else
1003  nb = 1;
1004 
1005  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1006  pts = sub->pts;
1007  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008  pts -= output_files[ost->file_index]->start_time;
1009  for (i = 0; i < nb; i++) {
1010  unsigned save_num_rects = sub->num_rects;
1011 
1012  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  sub->pts = pts;
1017  // start_display_time is required to be 0
1018  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019  sub->end_display_time -= sub->start_display_time;
1020  sub->start_display_time = 0;
1021  if (i == 1)
1022  sub->num_rects = 0;
1023 
1024  ost->frames_encoded++;
1025 
1026  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027  subtitle_out_max_size, sub);
1028  if (i == 1)
1029  sub->num_rects = save_num_rects;
1030  if (subtitle_out_size < 0) {
1031  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1032  exit_program(1);
1033  }
1034 
1035  av_init_packet(&pkt);
1036  pkt.data = subtitle_out;
1037  pkt.size = subtitle_out_size;
1038  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041  /* XXX: the pts correction is handled here. Maybe handling
1042  it in the codec would be better */
1043  if (i == 0)
1044  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1045  else
1046  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1047  }
1048  pkt.dts = pkt.pts;
1049  output_packet(of, &pkt, ost, 0);
1050  }
1051 }
1052 
1053 static void do_video_out(OutputFile *of,
1054  OutputStream *ost,
1055  AVFrame *next_picture,
1056  double sync_ipts)
1057 {
1058  int ret, format_video_sync;
1059  AVPacket pkt;
1060  AVCodecContext *enc = ost->enc_ctx;
1061  AVCodecParameters *mux_par = ost->st->codecpar;
1062  AVRational frame_rate;
1063  int nb_frames, nb0_frames, i;
1064  double delta, delta0;
1065  double duration = 0;
1066  int frame_size = 0;
1067  InputStream *ist = NULL;
1069 
1070  if (ost->source_index >= 0)
1071  ist = input_streams[ost->source_index];
1072 
1073  frame_rate = av_buffersink_get_frame_rate(filter);
1074  if (frame_rate.num > 0 && frame_rate.den > 0)
1075  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1076 
1077  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1079 
1080  if (!ost->filters_script &&
1081  !ost->filters &&
1082  next_picture &&
1083  ist &&
1084  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1085  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1086  }
1087 
1088  if (!next_picture) {
1089  //end, flushing
1090  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1091  ost->last_nb0_frames[1],
1092  ost->last_nb0_frames[2]);
1093  } else {
1094  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1095  delta = delta0 + duration;
1096 
1097  /* by default, we output a single frame */
1098  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1099  nb_frames = 1;
1100 
1101  format_video_sync = video_sync_method;
1102  if (format_video_sync == VSYNC_AUTO) {
1103  if(!strcmp(of->ctx->oformat->name, "avi")) {
1104  format_video_sync = VSYNC_VFR;
1105  } else
1106  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1107  if ( ist
1108  && format_video_sync == VSYNC_CFR
1109  && input_files[ist->file_index]->ctx->nb_streams == 1
1110  && input_files[ist->file_index]->input_ts_offset == 0) {
1111  format_video_sync = VSYNC_VSCFR;
1112  }
1113  if (format_video_sync == VSYNC_CFR && copy_ts) {
1114  format_video_sync = VSYNC_VSCFR;
1115  }
1116  }
1117  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1118 
1119  if (delta0 < 0 &&
1120  delta > 0 &&
1121  format_video_sync != VSYNC_PASSTHROUGH &&
1122  format_video_sync != VSYNC_DROP) {
1123  if (delta0 < -0.6) {
1124  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1125  } else
1126  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1127  sync_ipts = ost->sync_opts;
1128  duration += delta0;
1129  delta0 = 0;
1130  }
1131 
1132  switch (format_video_sync) {
1133  case VSYNC_VSCFR:
1134  if (ost->frame_number == 0 && delta0 >= 0.5) {
1135  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1136  delta = duration;
1137  delta0 = 0;
1138  ost->sync_opts = lrint(sync_ipts);
1139  }
1140  case VSYNC_CFR:
1141  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1142  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1143  nb_frames = 0;
1144  } else if (delta < -1.1)
1145  nb_frames = 0;
1146  else if (delta > 1.1) {
1147  nb_frames = lrintf(delta);
1148  if (delta0 > 1.1)
1149  nb0_frames = lrintf(delta0 - 0.6);
1150  }
1151  break;
1152  case VSYNC_VFR:
1153  if (delta <= -0.6)
1154  nb_frames = 0;
1155  else if (delta > 0.6)
1156  ost->sync_opts = lrint(sync_ipts);
1157  break;
1158  case VSYNC_DROP:
1159  case VSYNC_PASSTHROUGH:
1160  ost->sync_opts = lrint(sync_ipts);
1161  break;
1162  default:
1163  av_assert0(0);
1164  }
1165  }
1166 
1167  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1168  nb0_frames = FFMIN(nb0_frames, nb_frames);
1169 
1170  memmove(ost->last_nb0_frames + 1,
1171  ost->last_nb0_frames,
1172  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1173  ost->last_nb0_frames[0] = nb0_frames;
1174 
1175  if (nb0_frames == 0 && ost->last_dropped) {
1176  nb_frames_drop++;
1178  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1179  ost->frame_number, ost->st->index, ost->last_frame->pts);
1180  }
1181  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1182  if (nb_frames > dts_error_threshold * 30) {
1183  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1184  nb_frames_drop++;
1185  return;
1186  }
1187  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1188  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1189  if (nb_frames_dup > dup_warning) {
1190  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1191  dup_warning *= 10;
1192  }
1193  }
1194  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1195 
1196  /* duplicates frame if needed */
1197  for (i = 0; i < nb_frames; i++) {
1198  AVFrame *in_picture;
1199  av_init_packet(&pkt);
1200  pkt.data = NULL;
1201  pkt.size = 0;
1202 
1203  if (i < nb0_frames && ost->last_frame) {
1204  in_picture = ost->last_frame;
1205  } else
1206  in_picture = next_picture;
1207 
1208  if (!in_picture)
1209  return;
1210 
1211  in_picture->pts = ost->sync_opts;
1212 
1213 #if 1
1214  if (!check_recording_time(ost))
1215 #else
1216  if (ost->frame_number >= ost->max_frames)
1217 #endif
1218  return;
1219 
1220  {
1221  int forced_keyframe = 0;
1222  double pts_time;
1223 
1225  ost->top_field_first >= 0)
1226  in_picture->top_field_first = !!ost->top_field_first;
1227 
1228  if (in_picture->interlaced_frame) {
1229  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1231  else
1232  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1233  } else
1234  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1235 
1236  in_picture->quality = enc->global_quality;
1237  in_picture->pict_type = 0;
1238 
1239  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1240  in_picture->pts != AV_NOPTS_VALUE)
1241  ost->forced_kf_ref_pts = in_picture->pts;
1242 
1243  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1244  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1245  if (ost->forced_kf_index < ost->forced_kf_count &&
1246  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1247  ost->forced_kf_index++;
1248  forced_keyframe = 1;
1249  } else if (ost->forced_keyframes_pexpr) {
1250  double res;
1251  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1254  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1260  res);
1261  if (res) {
1262  forced_keyframe = 1;
1268  }
1269 
1271  } else if ( ost->forced_keyframes
1272  && !strncmp(ost->forced_keyframes, "source", 6)
1273  && in_picture->key_frame==1) {
1274  forced_keyframe = 1;
1275  }
1276 
1277  if (forced_keyframe) {
1278  in_picture->pict_type = AV_PICTURE_TYPE_I;
1279  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1280  }
1281 
1283  if (debug_ts) {
1284  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1285  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1286  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1287  enc->time_base.num, enc->time_base.den);
1288  }
1289 
1290  ost->frames_encoded++;
1291 
1292  ret = avcodec_send_frame(enc, in_picture);
1293  if (ret < 0)
1294  goto error;
1295 
1296  while (1) {
1297  ret = avcodec_receive_packet(enc, &pkt);
1298  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1299  if (ret == AVERROR(EAGAIN))
1300  break;
1301  if (ret < 0)
1302  goto error;
1303 
1304  if (debug_ts) {
1305  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1308  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1309  }
1310 
1311  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1312  pkt.pts = ost->sync_opts;
1313 
1314  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1315 
1316  if (debug_ts) {
1317  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1318  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1319  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1320  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1321  }
1322 
1323  frame_size = pkt.size;
1324  output_packet(of, &pkt, ost, 0);
1325 
1326  /* if two pass, output log */
1327  if (ost->logfile && enc->stats_out) {
1328  fprintf(ost->logfile, "%s", enc->stats_out);
1329  }
1330  }
1331  }
1332  ost->sync_opts++;
1333  /*
1334  * For video, number of frames in == number of packets out.
1335  * But there may be reordering, so we can't throw away frames on encoder
1336  * flush, we need to limit them here, before they go into encoder.
1337  */
1338  ost->frame_number++;
1339 
1340  if (vstats_filename && frame_size)
1341  do_video_stats(ost, frame_size);
1342  }
1343 
1344  if (!ost->last_frame)
1345  ost->last_frame = av_frame_alloc();
1346  av_frame_unref(ost->last_frame);
1347  if (next_picture && ost->last_frame)
1348  av_frame_ref(ost->last_frame, next_picture);
1349  else
1350  av_frame_free(&ost->last_frame);
1351 
1352  return;
1353 error:
1354  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1355  exit_program(1);
1356 }
1357 
1358 static double psnr(double d)
1359 {
1360  return -10.0 * log10(d);
1361 }
1362 
1364 {
1365  AVCodecContext *enc;
1366  int frame_number;
1367  double ti1, bitrate, avg_bitrate;
1368 
1369  /* this is executed just the first time do_video_stats is called */
1370  if (!vstats_file) {
1371  vstats_file = fopen(vstats_filename, "w");
1372  if (!vstats_file) {
1373  perror("fopen");
1374  exit_program(1);
1375  }
1376  }
1377 
1378  enc = ost->enc_ctx;
1379  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1380  frame_number = ost->st->nb_frames;
1381  if (vstats_version <= 1) {
1382  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1383  ost->quality / (float)FF_QP2LAMBDA);
1384  } else {
1385  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1386  ost->quality / (float)FF_QP2LAMBDA);
1387  }
1388 
1389  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1390  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1391 
1392  fprintf(vstats_file,"f_size= %6d ", frame_size);
1393  /* compute pts value */
1394  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1395  if (ti1 < 0.01)
1396  ti1 = 0.01;
1397 
1398  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1399  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1400  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1401  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1402  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1403  }
1404 }
1405 
1406 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1407 
1409 {
1410  OutputFile *of = output_files[ost->file_index];
1411  int i;
1412 
1414 
1415  if (of->shortest) {
1416  for (i = 0; i < of->ctx->nb_streams; i++)
1417  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1418  }
1419 }
1420 
1421 /**
1422  * Get and encode new output from any of the filtergraphs, without causing
1423  * activity.
1424  *
1425  * @return 0 for success, <0 for severe errors
1426  */
1427 static int reap_filters(int flush)
1428 {
1429  AVFrame *filtered_frame = NULL;
1430  int i;
1431 
1432  /* Reap all buffers present in the buffer sinks */
1433  for (i = 0; i < nb_output_streams; i++) {
1434  OutputStream *ost = output_streams[i];
1435  OutputFile *of = output_files[ost->file_index];
1437  AVCodecContext *enc = ost->enc_ctx;
1438  int ret = 0;
1439 
1440  if (!ost->filter || !ost->filter->graph->graph)
1441  continue;
1442  filter = ost->filter->filter;
1443 
1444  if (!ost->initialized) {
1445  char error[1024] = "";
1446  ret = init_output_stream(ost, error, sizeof(error));
1447  if (ret < 0) {
1448  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1449  ost->file_index, ost->index, error);
1450  exit_program(1);
1451  }
1452  }
1453 
1454  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1455  return AVERROR(ENOMEM);
1456  }
1457  filtered_frame = ost->filtered_frame;
1458 
1459  while (1) {
1460  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1461  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1463  if (ret < 0) {
1464  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1466  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1467  } else if (flush && ret == AVERROR_EOF) {
1469  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1470  }
1471  break;
1472  }
1473  if (ost->finished) {
1474  av_frame_unref(filtered_frame);
1475  continue;
1476  }
1477  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1478  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1479  AVRational filter_tb = av_buffersink_get_time_base(filter);
1480  AVRational tb = enc->time_base;
1481  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1482 
1483  tb.den <<= extra_bits;
1484  float_pts =
1485  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1486  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1487  float_pts /= 1 << extra_bits;
1488  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1489  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1490 
1491  filtered_frame->pts =
1492  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1493  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1494  }
1495  //if (ost->source_index >= 0)
1496  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1497 
1498  switch (av_buffersink_get_type(filter)) {
1499  case AVMEDIA_TYPE_VIDEO:
1500  if (!ost->frame_aspect_ratio.num)
1501  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1502 
1503  if (debug_ts) {
1504  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1505  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1506  float_pts,
1507  enc->time_base.num, enc->time_base.den);
1508  }
1509 
1510  do_video_out(of, ost, filtered_frame, float_pts);
1511  break;
1512  case AVMEDIA_TYPE_AUDIO:
1513  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1514  enc->channels != filtered_frame->channels) {
1516  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1517  break;
1518  }
1519  do_audio_out(of, ost, filtered_frame);
1520  break;
1521  default:
1522  // TODO support subtitle filters
1523  av_assert0(0);
1524  }
1525 
1526  av_frame_unref(filtered_frame);
1527  }
1528  }
1529 
1530  return 0;
1531 }
1532 
1533 static void print_final_stats(int64_t total_size)
1534 {
1535  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1536  uint64_t subtitle_size = 0;
1537  uint64_t data_size = 0;
1538  float percent = -1.0;
1539  int i, j;
1540  int pass1_used = 1;
1541 
1542  for (i = 0; i < nb_output_streams; i++) {
1543  OutputStream *ost = output_streams[i];
1544  switch (ost->enc_ctx->codec_type) {
1545  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1546  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1547  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1548  default: other_size += ost->data_size; break;
1549  }
1550  extra_size += ost->enc_ctx->extradata_size;
1551  data_size += ost->data_size;
1554  pass1_used = 0;
1555  }
1556 
1557  if (data_size && total_size>0 && total_size >= data_size)
1558  percent = 100.0 * (total_size - data_size) / data_size;
1559 
1560  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1561  video_size / 1024.0,
1562  audio_size / 1024.0,
1563  subtitle_size / 1024.0,
1564  other_size / 1024.0,
1565  extra_size / 1024.0);
1566  if (percent >= 0.0)
1567  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1568  else
1569  av_log(NULL, AV_LOG_INFO, "unknown");
1570  av_log(NULL, AV_LOG_INFO, "\n");
1571 
1572  /* print verbose per-stream stats */
1573  for (i = 0; i < nb_input_files; i++) {
1574  InputFile *f = input_files[i];
1575  uint64_t total_packets = 0, total_size = 0;
1576 
1577  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1578  i, f->ctx->url);
1579 
1580  for (j = 0; j < f->nb_streams; j++) {
1581  InputStream *ist = input_streams[f->ist_index + j];
1582  enum AVMediaType type = ist->dec_ctx->codec_type;
1583 
1584  total_size += ist->data_size;
1585  total_packets += ist->nb_packets;
1586 
1587  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1588  i, j, media_type_string(type));
1589  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1590  ist->nb_packets, ist->data_size);
1591 
1592  if (ist->decoding_needed) {
1593  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1594  ist->frames_decoded);
1595  if (type == AVMEDIA_TYPE_AUDIO)
1596  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1597  av_log(NULL, AV_LOG_VERBOSE, "; ");
1598  }
1599 
1600  av_log(NULL, AV_LOG_VERBOSE, "\n");
1601  }
1602 
1603  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1604  total_packets, total_size);
1605  }
1606 
1607  for (i = 0; i < nb_output_files; i++) {
1608  OutputFile *of = output_files[i];
1609  uint64_t total_packets = 0, total_size = 0;
1610 
1611  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1612  i, of->ctx->url);
1613 
1614  for (j = 0; j < of->ctx->nb_streams; j++) {
1615  OutputStream *ost = output_streams[of->ost_index + j];
1616  enum AVMediaType type = ost->enc_ctx->codec_type;
1617 
1618  total_size += ost->data_size;
1619  total_packets += ost->packets_written;
1620 
1621  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1622  i, j, media_type_string(type));
1623  if (ost->encoding_needed) {
1624  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1625  ost->frames_encoded);
1626  if (type == AVMEDIA_TYPE_AUDIO)
1627  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1628  av_log(NULL, AV_LOG_VERBOSE, "; ");
1629  }
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1632  ost->packets_written, ost->data_size);
1633 
1634  av_log(NULL, AV_LOG_VERBOSE, "\n");
1635  }
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1638  total_packets, total_size);
1639  }
1640  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1641  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1642  if (pass1_used) {
1643  av_log(NULL, AV_LOG_WARNING, "\n");
1644  } else {
1645  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1646  }
1647  }
1648 }
1649 
1650 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1651 {
1652  AVBPrint buf, buf_script;
1653  OutputStream *ost;
1654  AVFormatContext *oc;
1655  int64_t total_size;
1656  AVCodecContext *enc;
1657  int frame_number, vid, i;
1658  double bitrate;
1659  double speed;
1660  int64_t pts = INT64_MIN + 1;
1661  static int64_t last_time = -1;
1662  static int qp_histogram[52];
1663  int hours, mins, secs, us;
1664  const char *hours_sign;
1665  int ret;
1666  float t;
1667 
1668  if (!print_stats && !is_last_report && !progress_avio)
1669  return;
1670 
1671  if (!is_last_report) {
1672  if (last_time == -1) {
1673  last_time = cur_time;
1674  return;
1675  }
1676  if ((cur_time - last_time) < 500000)
1677  return;
1678  last_time = cur_time;
1679  }
1680 
1681  t = (cur_time-timer_start) / 1000000.0;
1682 
1683 
1684  oc = output_files[0]->ctx;
1685 
1686  total_size = avio_size(oc->pb);
1687  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1688  total_size = avio_tell(oc->pb);
1689 
1690  vid = 0;
1692  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1693  for (i = 0; i < nb_output_streams; i++) {
1694  float q = -1;
1695  ost = output_streams[i];
1696  enc = ost->enc_ctx;
1697  if (!ost->stream_copy)
1698  q = ost->quality / (float) FF_QP2LAMBDA;
1699 
1700  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701  av_bprintf(&buf, "q=%2.1f ", q);
1702  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703  ost->file_index, ost->index, q);
1704  }
1705  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1706  float fps;
1707 
1708  frame_number = ost->frame_number;
1709  fps = t > 1 ? frame_number / t : 0;
1710  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1711  frame_number, fps < 9.95, fps, q);
1712  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1713  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1714  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1715  ost->file_index, ost->index, q);
1716  if (is_last_report)
1717  av_bprintf(&buf, "L");
1718  if (qp_hist) {
1719  int j;
1720  int qp = lrintf(q);
1721  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1722  qp_histogram[qp]++;
1723  for (j = 0; j < 32; j++)
1724  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1725  }
1726 
1727  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1728  int j;
1729  double error, error_sum = 0;
1730  double scale, scale_sum = 0;
1731  double p;
1732  char type[3] = { 'Y','U','V' };
1733  av_bprintf(&buf, "PSNR=");
1734  for (j = 0; j < 3; j++) {
1735  if (is_last_report) {
1736  error = enc->error[j];
1737  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1738  } else {
1739  error = ost->error[j];
1740  scale = enc->width * enc->height * 255.0 * 255.0;
1741  }
1742  if (j)
1743  scale /= 4;
1744  error_sum += error;
1745  scale_sum += scale;
1746  p = psnr(error / scale);
1747  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1748  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1749  ost->file_index, ost->index, type[j] | 32, p);
1750  }
1751  p = psnr(error_sum / scale_sum);
1752  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1753  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1754  ost->file_index, ost->index, p);
1755  }
1756  vid = 1;
1757  }
1758  /* compute min output value */
1760  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1761  ost->st->time_base, AV_TIME_BASE_Q));
1762  if (is_last_report)
1763  nb_frames_drop += ost->last_dropped;
1764  }
1765 
1766  secs = FFABS(pts) / AV_TIME_BASE;
1767  us = FFABS(pts) % AV_TIME_BASE;
1768  mins = secs / 60;
1769  secs %= 60;
1770  hours = mins / 60;
1771  mins %= 60;
1772  hours_sign = (pts < 0) ? "-" : "";
1773 
1774  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1775  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1776 
1777  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1778  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1779  if (pts == AV_NOPTS_VALUE) {
1780  av_bprintf(&buf, "N/A ");
1781  } else {
1782  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1783  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1784  }
1785 
1786  if (bitrate < 0) {
1787  av_bprintf(&buf, "bitrate=N/A");
1788  av_bprintf(&buf_script, "bitrate=N/A\n");
1789  }else{
1790  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1791  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1792  }
1793 
1794  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1795  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1796  if (pts == AV_NOPTS_VALUE) {
1797  av_bprintf(&buf_script, "out_time_us=N/A\n");
1798  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1799  av_bprintf(&buf_script, "out_time=N/A\n");
1800  } else {
1801  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1802  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1803  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1804  hours_sign, hours, mins, secs, us);
1805  }
1806 
1808  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1809  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1810  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1811 
1812  if (speed < 0) {
1813  av_bprintf(&buf, " speed=N/A");
1814  av_bprintf(&buf_script, "speed=N/A\n");
1815  } else {
1816  av_bprintf(&buf, " speed=%4.3gx", speed);
1817  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1818  }
1819 
1820  if (print_stats || is_last_report) {
1821  const char end = is_last_report ? '\n' : '\r';
1822  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1823  fprintf(stderr, "%s %c", buf.str, end);
1824  } else
1825  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1826 
1827  fflush(stderr);
1828  }
1829  av_bprint_finalize(&buf, NULL);
1830 
1831  if (progress_avio) {
1832  av_bprintf(&buf_script, "progress=%s\n",
1833  is_last_report ? "end" : "continue");
1834  avio_write(progress_avio, buf_script.str,
1835  FFMIN(buf_script.len, buf_script.size - 1));
1836  avio_flush(progress_avio);
1837  av_bprint_finalize(&buf_script, NULL);
1838  if (is_last_report) {
1839  if ((ret = avio_closep(&progress_avio)) < 0)
1841  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1842  }
1843  }
1844 
1845  if (is_last_report)
1846  print_final_stats(total_size);
1847 }
1848 
1850 {
1851  // We never got any input. Set a fake format, which will
1852  // come from libavformat.
1853  ifilter->format = par->format;
1854  ifilter->sample_rate = par->sample_rate;
1855  ifilter->channels = par->channels;
1856  ifilter->channel_layout = par->channel_layout;
1857  ifilter->width = par->width;
1858  ifilter->height = par->height;
1859  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1860 }
1861 
1862 static void flush_encoders(void)
1863 {
1864  int i, ret;
1865 
1866  for (i = 0; i < nb_output_streams; i++) {
1867  OutputStream *ost = output_streams[i];
1868  AVCodecContext *enc = ost->enc_ctx;
1869  OutputFile *of = output_files[ost->file_index];
1870 
1871  if (!ost->encoding_needed)
1872  continue;
1873 
1874  // Try to enable encoding with no input frames.
1875  // Maybe we should just let encoding fail instead.
1876  if (!ost->initialized) {
1877  FilterGraph *fg = ost->filter->graph;
1878  char error[1024] = "";
1879 
1881  "Finishing stream %d:%d without any data written to it.\n",
1882  ost->file_index, ost->st->index);
1883 
1884  if (ost->filter && !fg->graph) {
1885  int x;
1886  for (x = 0; x < fg->nb_inputs; x++) {
1887  InputFilter *ifilter = fg->inputs[x];
1888  if (ifilter->format < 0)
1889  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1890  }
1891 
1893  continue;
1894 
1895  ret = configure_filtergraph(fg);
1896  if (ret < 0) {
1897  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1898  exit_program(1);
1899  }
1900 
1901  finish_output_stream(ost);
1902  }
1903 
1904  ret = init_output_stream(ost, error, sizeof(error));
1905  if (ret < 0) {
1906  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1907  ost->file_index, ost->index, error);
1908  exit_program(1);
1909  }
1910  }
1911 
1912  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1913  continue;
1914 
1916  continue;
1917 
1918  for (;;) {
1919  const char *desc = NULL;
1920  AVPacket pkt;
1921  int pkt_size;
1922 
1923  switch (enc->codec_type) {
1924  case AVMEDIA_TYPE_AUDIO:
1925  desc = "audio";
1926  break;
1927  case AVMEDIA_TYPE_VIDEO:
1928  desc = "video";
1929  break;
1930  default:
1931  av_assert0(0);
1932  }
1933 
1934  av_init_packet(&pkt);
1935  pkt.data = NULL;
1936  pkt.size = 0;
1937 
1939 
1940  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1941  ret = avcodec_send_frame(enc, NULL);
1942  if (ret < 0) {
1943  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1944  desc,
1945  av_err2str(ret));
1946  exit_program(1);
1947  }
1948  }
1949 
1950  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1951  if (ret < 0 && ret != AVERROR_EOF) {
1952  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1953  desc,
1954  av_err2str(ret));
1955  exit_program(1);
1956  }
1957  if (ost->logfile && enc->stats_out) {
1958  fprintf(ost->logfile, "%s", enc->stats_out);
1959  }
1960  if (ret == AVERROR_EOF) {
1961  output_packet(of, &pkt, ost, 1);
1962  break;
1963  }
1964  if (ost->finished & MUXER_FINISHED) {
1965  av_packet_unref(&pkt);
1966  continue;
1967  }
1968  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1969  pkt_size = pkt.size;
1970  output_packet(of, &pkt, ost, 0);
1972  do_video_stats(ost, pkt_size);
1973  }
1974  }
1975  }
1976 }
1977 
1978 /*
1979  * Check whether a packet from ist should be written into ost at this time
1980  */
1982 {
1983  OutputFile *of = output_files[ost->file_index];
1984  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1985 
1986  if (ost->source_index != ist_index)
1987  return 0;
1988 
1989  if (ost->finished)
1990  return 0;
1991 
1992  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1993  return 0;
1994 
1995  return 1;
1996 }
1997 
1999 {
2000  OutputFile *of = output_files[ost->file_index];
2001  InputFile *f = input_files [ist->file_index];
2002  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2003  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2004  AVPacket opkt = { 0 };
2005 
2006  av_init_packet(&opkt);
2007 
2008  // EOF: flush output bitstream filters.
2009  if (!pkt) {
2010  output_packet(of, &opkt, ost, 1);
2011  return;
2012  }
2013 
2014  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2016  return;
2017 
2018  if (!ost->frame_number && !ost->copy_prior_start) {
2019  int64_t comp_start = start_time;
2020  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2021  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2022  if (pkt->pts == AV_NOPTS_VALUE ?
2023  ist->pts < comp_start :
2024  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2025  return;
2026  }
2027 
2028  if (of->recording_time != INT64_MAX &&
2029  ist->pts >= of->recording_time + start_time) {
2030  close_output_stream(ost);
2031  return;
2032  }
2033 
2034  if (f->recording_time != INT64_MAX) {
2035  start_time = f->ctx->start_time;
2036  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2037  start_time += f->start_time;
2038  if (ist->pts >= f->recording_time + start_time) {
2039  close_output_stream(ost);
2040  return;
2041  }
2042  }
2043 
2044  /* force the input stream PTS */
2045  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2046  ost->sync_opts++;
2047 
2048  if (pkt->pts != AV_NOPTS_VALUE)
2049  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2050  else
2051  opkt.pts = AV_NOPTS_VALUE;
2052 
2053  if (pkt->dts == AV_NOPTS_VALUE)
2054  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2055  else
2056  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2057  opkt.dts -= ost_tb_start_time;
2058 
2059  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2061  if(!duration)
2062  duration = ist->dec_ctx->frame_size;
2063  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2065  ost->mux_timebase) - ost_tb_start_time;
2066  }
2067 
2068  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2069 
2070  opkt.flags = pkt->flags;
2071 
2072  if (pkt->buf) {
2073  opkt.buf = av_buffer_ref(pkt->buf);
2074  if (!opkt.buf)
2075  exit_program(1);
2076  }
2077  opkt.data = pkt->data;
2078  opkt.size = pkt->size;
2079 
2080  av_copy_packet_side_data(&opkt, pkt);
2081 
2082  output_packet(of, &opkt, ost, 0);
2083 }
2084 
2086 {
2087  AVCodecContext *dec = ist->dec_ctx;
2088 
2089  if (!dec->channel_layout) {
2090  char layout_name[256];
2091 
2092  if (dec->channels > ist->guess_layout_max)
2093  return 0;
2095  if (!dec->channel_layout)
2096  return 0;
2097  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2098  dec->channels, dec->channel_layout);
2099  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2100  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2101  }
2102  return 1;
2103 }
2104 
2105 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2106 {
2107  if (*got_output || ret<0)
2108  decode_error_stat[ret<0] ++;
2109 
2110  if (ret < 0 && exit_on_error)
2111  exit_program(1);
2112 
2113  if (exit_on_error && *got_output && ist) {
2115  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2116  exit_program(1);
2117  }
2118  }
2119 }
2120 
2121 // Filters can be configured only if the formats of all inputs are known.
2123 {
2124  int i;
2125  for (i = 0; i < fg->nb_inputs; i++) {
2126  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2127  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2128  return 0;
2129  }
2130  return 1;
2131 }
2132 
2134 {
2135  FilterGraph *fg = ifilter->graph;
2136  int need_reinit, ret, i;
2137 
2138  /* determine if the parameters for this input changed */
2139  need_reinit = ifilter->format != frame->format;
2140  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2141  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2142  need_reinit = 1;
2143 
2144  switch (ifilter->ist->st->codecpar->codec_type) {
2145  case AVMEDIA_TYPE_AUDIO:
2146  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2147  ifilter->channels != frame->channels ||
2148  ifilter->channel_layout != frame->channel_layout;
2149  break;
2150  case AVMEDIA_TYPE_VIDEO:
2151  need_reinit |= ifilter->width != frame->width ||
2152  ifilter->height != frame->height;
2153  break;
2154  }
2155 
2156  if (need_reinit) {
2157  ret = ifilter_parameters_from_frame(ifilter, frame);
2158  if (ret < 0)
2159  return ret;
2160  }
2161 
2162  /* (re)init the graph if possible, otherwise buffer the frame and return */
2163  if (need_reinit || !fg->graph) {
2164  for (i = 0; i < fg->nb_inputs; i++) {
2165  if (!ifilter_has_all_input_formats(fg)) {
2166  AVFrame *tmp = av_frame_clone(frame);
2167  if (!tmp)
2168  return AVERROR(ENOMEM);
2169  av_frame_unref(frame);
2170 
2171  if (!av_fifo_space(ifilter->frame_queue)) {
2172  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2173  if (ret < 0) {
2174  av_frame_free(&tmp);
2175  return ret;
2176  }
2177  }
2178  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2179  return 0;
2180  }
2181  }
2182 
2183  ret = reap_filters(1);
2184  if (ret < 0 && ret != AVERROR_EOF) {
2185  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2186  return ret;
2187  }
2188 
2189  ret = configure_filtergraph(fg);
2190  if (ret < 0) {
2191  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2192  return ret;
2193  }
2194  }
2195 
2197  if (ret < 0) {
2198  if (ret != AVERROR_EOF)
2199  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2200  return ret;
2201  }
2202 
2203  return 0;
2204 }
2205 
2206 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2207 {
2208  int ret;
2209 
2210  ifilter->eof = 1;
2211 
2212  if (ifilter->filter) {
2213  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2214  if (ret < 0)
2215  return ret;
2216  } else {
2217  // the filtergraph was never configured
2218  if (ifilter->format < 0)
2219  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2220  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2221  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2222  return AVERROR_INVALIDDATA;
2223  }
2224  }
2225 
2226  return 0;
2227 }
2228 
2229 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2230 // There is the following difference: if you got a frame, you must call
2231 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2232 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2233 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2234 {
2235  int ret;
2236 
2237  *got_frame = 0;
2238 
2239  if (pkt) {
2240  ret = avcodec_send_packet(avctx, pkt);
2241  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2242  // decoded frames with avcodec_receive_frame() until done.
2243  if (ret < 0 && ret != AVERROR_EOF)
2244  return ret;
2245  }
2246 
2247  ret = avcodec_receive_frame(avctx, frame);
2248  if (ret < 0 && ret != AVERROR(EAGAIN))
2249  return ret;
2250  if (ret >= 0)
2251  *got_frame = 1;
2252 
2253  return 0;
2254 }
2255 
2256 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2257 {
2258  int i, ret;
2259  AVFrame *f;
2260 
2261  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2262  for (i = 0; i < ist->nb_filters; i++) {
2263  if (i < ist->nb_filters - 1) {
2264  f = ist->filter_frame;
2265  ret = av_frame_ref(f, decoded_frame);
2266  if (ret < 0)
2267  break;
2268  } else
2269  f = decoded_frame;
2270  ret = ifilter_send_frame(ist->filters[i], f);
2271  if (ret == AVERROR_EOF)
2272  ret = 0; /* ignore */
2273  if (ret < 0) {
2275  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2276  break;
2277  }
2278  }
2279  return ret;
2280 }
2281 
2282 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2283  int *decode_failed)
2284 {
2285  AVFrame *decoded_frame;
2286  AVCodecContext *avctx = ist->dec_ctx;
2287  int ret, err = 0;
2288  AVRational decoded_frame_tb;
2289 
2290  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2291  return AVERROR(ENOMEM);
2292  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2293  return AVERROR(ENOMEM);
2294  decoded_frame = ist->decoded_frame;
2295 
2297  ret = decode(avctx, decoded_frame, got_output, pkt);
2298  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2299  if (ret < 0)
2300  *decode_failed = 1;
2301 
2302  if (ret >= 0 && avctx->sample_rate <= 0) {
2303  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2304  ret = AVERROR_INVALIDDATA;
2305  }
2306 
2307  if (ret != AVERROR_EOF)
2308  check_decode_result(ist, got_output, ret);
2309 
2310  if (!*got_output || ret < 0)
2311  return ret;
2312 
2313  ist->samples_decoded += decoded_frame->nb_samples;
2314  ist->frames_decoded++;
2315 
2316 #if 1
2317  /* increment next_dts to use for the case where the input stream does not
2318  have timestamps or there are multiple frames in the packet */
2319  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320  avctx->sample_rate;
2321  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2322  avctx->sample_rate;
2323 #endif
2324 
2325  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2326  decoded_frame_tb = ist->st->time_base;
2327  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2328  decoded_frame->pts = pkt->pts;
2329  decoded_frame_tb = ist->st->time_base;
2330  }else {
2331  decoded_frame->pts = ist->dts;
2332  decoded_frame_tb = AV_TIME_BASE_Q;
2333  }
2334  if (decoded_frame->pts != AV_NOPTS_VALUE)
2335  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2336  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2337  (AVRational){1, avctx->sample_rate});
2338  ist->nb_samples = decoded_frame->nb_samples;
2339  err = send_frame_to_filters(ist, decoded_frame);
2340 
2341  av_frame_unref(ist->filter_frame);
2342  av_frame_unref(decoded_frame);
2343  return err < 0 ? err : ret;
2344 }
2345 
2346 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2347  int *decode_failed)
2348 {
2349  AVFrame *decoded_frame;
2350  int i, ret = 0, err = 0;
2351  int64_t best_effort_timestamp;
2352  int64_t dts = AV_NOPTS_VALUE;
2353  AVPacket avpkt;
2354 
2355  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2356  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2357  // skip the packet.
2358  if (!eof && pkt && pkt->size == 0)
2359  return 0;
2360 
2361  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2362  return AVERROR(ENOMEM);
2363  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2364  return AVERROR(ENOMEM);
2365  decoded_frame = ist->decoded_frame;
2366  if (ist->dts != AV_NOPTS_VALUE)
2367  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2368  if (pkt) {
2369  avpkt = *pkt;
2370  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2371  }
2372 
2373  // The old code used to set dts on the drain packet, which does not work
2374  // with the new API anymore.
2375  if (eof) {
2376  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2377  if (!new)
2378  return AVERROR(ENOMEM);
2379  ist->dts_buffer = new;
2380  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2381  }
2382 
2384  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2385  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2386  if (ret < 0)
2387  *decode_failed = 1;
2388 
2389  // The following line may be required in some cases where there is no parser
2390  // or the parser does not has_b_frames correctly
2391  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2392  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2393  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2394  } else
2396  "video_delay is larger in decoder than demuxer %d > %d.\n"
2397  "If you want to help, upload a sample "
2398  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2399  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2400  ist->dec_ctx->has_b_frames,
2401  ist->st->codecpar->video_delay);
2402  }
2403 
2404  if (ret != AVERROR_EOF)
2405  check_decode_result(ist, got_output, ret);
2406 
2407  if (*got_output && ret >= 0) {
2408  if (ist->dec_ctx->width != decoded_frame->width ||
2409  ist->dec_ctx->height != decoded_frame->height ||
2410  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2411  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2412  decoded_frame->width,
2413  decoded_frame->height,
2414  decoded_frame->format,
2415  ist->dec_ctx->width,
2416  ist->dec_ctx->height,
2417  ist->dec_ctx->pix_fmt);
2418  }
2419  }
2420 
2421  if (!*got_output || ret < 0)
2422  return ret;
2423 
2424  if(ist->top_field_first>=0)
2425  decoded_frame->top_field_first = ist->top_field_first;
2426 
2427  ist->frames_decoded++;
2428 
2429  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2430  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2431  if (err < 0)
2432  goto fail;
2433  }
2434  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2435 
2436  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2437  *duration_pts = decoded_frame->pkt_duration;
2438 
2439  if (ist->framerate.num)
2440  best_effort_timestamp = ist->cfr_next_pts++;
2441 
2442  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2443  best_effort_timestamp = ist->dts_buffer[0];
2444 
2445  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2446  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2447  ist->nb_dts_buffer--;
2448  }
2449 
2450  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2451  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2452 
2453  if (ts != AV_NOPTS_VALUE)
2454  ist->next_pts = ist->pts = ts;
2455  }
2456 
2457  if (debug_ts) {
2458  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2459  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2460  ist->st->index, av_ts2str(decoded_frame->pts),
2461  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2462  best_effort_timestamp,
2463  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2464  decoded_frame->key_frame, decoded_frame->pict_type,
2465  ist->st->time_base.num, ist->st->time_base.den);
2466  }
2467 
2468  if (ist->st->sample_aspect_ratio.num)
2469  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2470 
2471  err = send_frame_to_filters(ist, decoded_frame);
2472 
2473 fail:
2475  av_frame_unref(decoded_frame);
2476  return err < 0 ? err : ret;
2477 }
2478 
2479 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2480  int *decode_failed)
2481 {
2482  AVSubtitle subtitle;
2483  int free_sub = 1;
2484  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2485  &subtitle, got_output, pkt);
2486 
2487  check_decode_result(NULL, got_output, ret);
2488 
2489  if (ret < 0 || !*got_output) {
2490  *decode_failed = 1;
2491  if (!pkt->size)
2492  sub2video_flush(ist);
2493  return ret;
2494  }
2495 
2496  if (ist->fix_sub_duration) {
2497  int end = 1;
2498  if (ist->prev_sub.got_output) {
2499  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2500  1000, AV_TIME_BASE);
2501  if (end < ist->prev_sub.subtitle.end_display_time) {
2502  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2503  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2505  end <= 0 ? ", dropping it" : "");
2507  }
2508  }
2509  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2510  FFSWAP(int, ret, ist->prev_sub.ret);
2511  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2512  if (end <= 0)
2513  goto out;
2514  }
2515 
2516  if (!*got_output)
2517  return ret;
2518 
2519  if (ist->sub2video.frame) {
2520  sub2video_update(ist, &subtitle);
2521  } else if (ist->nb_filters) {
2522  if (!ist->sub2video.sub_queue)
2523  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2524  if (!ist->sub2video.sub_queue)
2525  exit_program(1);
2526  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2528  if (ret < 0)
2529  exit_program(1);
2530  }
2531  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2532  free_sub = 0;
2533  }
2534 
2535  if (!subtitle.num_rects)
2536  goto out;
2537 
2538  ist->frames_decoded++;
2539 
2540  for (i = 0; i < nb_output_streams; i++) {
2541  OutputStream *ost = output_streams[i];
2542 
2543  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2544  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2545  continue;
2546 
2547  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2548  }
2549 
2550 out:
2551  if (free_sub)
2552  avsubtitle_free(&subtitle);
2553  return ret;
2554 }
2555 
2557 {
2558  int i, ret;
2559  /* TODO keep pts also in stream time base to avoid converting back */
2560  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2562 
2563  for (i = 0; i < ist->nb_filters; i++) {
2564  ret = ifilter_send_eof(ist->filters[i], pts);
2565  if (ret < 0)
2566  return ret;
2567  }
2568  return 0;
2569 }
2570 
2571 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2572 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2573 {
2574  int ret = 0, i;
2575  int repeating = 0;
2576  int eof_reached = 0;
2577 
2578  AVPacket avpkt;
2579  if (!ist->saw_first_ts) {
2580  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2581  ist->pts = 0;
2582  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2583  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2584  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2585  }
2586  ist->saw_first_ts = 1;
2587  }
2588 
2589  if (ist->next_dts == AV_NOPTS_VALUE)
2590  ist->next_dts = ist->dts;
2591  if (ist->next_pts == AV_NOPTS_VALUE)
2592  ist->next_pts = ist->pts;
2593 
2594  if (!pkt) {
2595  /* EOF handling */
2596  av_init_packet(&avpkt);
2597  avpkt.data = NULL;
2598  avpkt.size = 0;
2599  } else {
2600  avpkt = *pkt;
2601  }
2602 
2603  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2604  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2605  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2606  ist->next_pts = ist->pts = ist->dts;
2607  }
2608 
2609  // while we have more to decode or while the decoder did output something on EOF
2610  while (ist->decoding_needed) {
2611  int64_t duration_dts = 0;
2612  int64_t duration_pts = 0;
2613  int got_output = 0;
2614  int decode_failed = 0;
2615 
2616  ist->pts = ist->next_pts;
2617  ist->dts = ist->next_dts;
2618 
2619  switch (ist->dec_ctx->codec_type) {
2620  case AVMEDIA_TYPE_AUDIO:
2621  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2622  &decode_failed);
2623  break;
2624  case AVMEDIA_TYPE_VIDEO:
2625  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2626  &decode_failed);
2627  if (!repeating || !pkt || got_output) {
2628  if (pkt && pkt->duration) {
2629  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2632  duration_dts = ((int64_t)AV_TIME_BASE *
2633  ist->dec_ctx->framerate.den * ticks) /
2635  }
2636 
2637  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2638  ist->next_dts += duration_dts;
2639  }else
2640  ist->next_dts = AV_NOPTS_VALUE;
2641  }
2642 
2643  if (got_output) {
2644  if (duration_pts > 0) {
2645  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2646  } else {
2647  ist->next_pts += duration_dts;
2648  }
2649  }
2650  break;
2651  case AVMEDIA_TYPE_SUBTITLE:
2652  if (repeating)
2653  break;
2654  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2655  if (!pkt && ret >= 0)
2656  ret = AVERROR_EOF;
2657  break;
2658  default:
2659  return -1;
2660  }
2661 
2662  if (ret == AVERROR_EOF) {
2663  eof_reached = 1;
2664  break;
2665  }
2666 
2667  if (ret < 0) {
2668  if (decode_failed) {
2669  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2670  ist->file_index, ist->st->index, av_err2str(ret));
2671  } else {
2672  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2673  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2674  }
2675  if (!decode_failed || exit_on_error)
2676  exit_program(1);
2677  break;
2678  }
2679 
2680  if (got_output)
2681  ist->got_output = 1;
2682 
2683  if (!got_output)
2684  break;
2685 
2686  // During draining, we might get multiple output frames in this loop.
2687  // ffmpeg.c does not drain the filter chain on configuration changes,
2688  // which means if we send multiple frames at once to the filters, and
2689  // one of those frames changes configuration, the buffered frames will
2690  // be lost. This can upset certain FATE tests.
2691  // Decode only 1 frame per call on EOF to appease these FATE tests.
2692  // The ideal solution would be to rewrite decoding to use the new
2693  // decoding API in a better way.
2694  if (!pkt)
2695  break;
2696 
2697  repeating = 1;
2698  }
2699 
2700  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2701  /* except when looping we need to flush but not to send an EOF */
2702  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2703  int ret = send_filter_eof(ist);
2704  if (ret < 0) {
2705  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2706  exit_program(1);
2707  }
2708  }
2709 
2710  /* handle stream copy */
2711  if (!ist->decoding_needed && pkt) {
2712  ist->dts = ist->next_dts;
2713  switch (ist->dec_ctx->codec_type) {
2714  case AVMEDIA_TYPE_AUDIO:
2715  av_assert1(pkt->duration >= 0);
2716  if (ist->dec_ctx->sample_rate) {
2717  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2718  ist->dec_ctx->sample_rate;
2719  } else {
2720  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721  }
2722  break;
2723  case AVMEDIA_TYPE_VIDEO:
2724  if (ist->framerate.num) {
2725  // TODO: Remove work-around for c99-to-c89 issue 7
2726  AVRational time_base_q = AV_TIME_BASE_Q;
2727  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2728  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2729  } else if (pkt->duration) {
2730  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2731  } else if(ist->dec_ctx->framerate.num != 0) {
2732  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2733  ist->next_dts += ((int64_t)AV_TIME_BASE *
2734  ist->dec_ctx->framerate.den * ticks) /
2736  }
2737  break;
2738  }
2739  ist->pts = ist->dts;
2740  ist->next_pts = ist->next_dts;
2741  }
2742  for (i = 0; i < nb_output_streams; i++) {
2743  OutputStream *ost = output_streams[i];
2744 
2745  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2746  continue;
2747 
2748  do_streamcopy(ist, ost, pkt);
2749  }
2750 
2751  return !eof_reached;
2752 }
2753 
2754 static void print_sdp(void)
2755 {
2756  char sdp[16384];
2757  int i;
2758  int j;
2759  AVIOContext *sdp_pb;
2760  AVFormatContext **avc;
2761 
2762  for (i = 0; i < nb_output_files; i++) {
2763  if (!output_files[i]->header_written)
2764  return;
2765  }
2766 
2767  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2768  if (!avc)
2769  exit_program(1);
2770  for (i = 0, j = 0; i < nb_output_files; i++) {
2771  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2772  avc[j] = output_files[i]->ctx;
2773  j++;
2774  }
2775  }
2776 
2777  if (!j)
2778  goto fail;
2779 
2780  av_sdp_create(avc, j, sdp, sizeof(sdp));
2781 
2782  if (!sdp_filename) {
2783  printf("SDP:\n%s\n", sdp);
2784  fflush(stdout);
2785  } else {
2786  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2787  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2788  } else {
2789  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2790  avio_closep(&sdp_pb);
2792  }
2793  }
2794 
2795 fail:
2796  av_freep(&avc);
2797 }
2798 
2800 {
2801  InputStream *ist = s->opaque;
2802  const enum AVPixelFormat *p;
2803  int ret;
2804 
2805  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2807  const AVCodecHWConfig *config = NULL;
2808  int i;
2809 
2810  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811  break;
2812 
2813  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2814  ist->hwaccel_id == HWACCEL_AUTO) {
2815  for (i = 0;; i++) {
2816  config = avcodec_get_hw_config(s->codec, i);
2817  if (!config)
2818  break;
2819  if (!(config->methods &
2821  continue;
2822  if (config->pix_fmt == *p)
2823  break;
2824  }
2825  }
2826  if (config) {
2827  if (config->device_type != ist->hwaccel_device_type) {
2828  // Different hwaccel offered, ignore.
2829  continue;
2830  }
2831 
2832  ret = hwaccel_decode_init(s);
2833  if (ret < 0) {
2834  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2836  "%s hwaccel requested for input stream #%d:%d, "
2837  "but cannot be initialized.\n",
2839  ist->file_index, ist->st->index);
2840  return AV_PIX_FMT_NONE;
2841  }
2842  continue;
2843  }
2844  } else {
2845  const HWAccel *hwaccel = NULL;
2846  int i;
2847  for (i = 0; hwaccels[i].name; i++) {
2848  if (hwaccels[i].pix_fmt == *p) {
2849  hwaccel = &hwaccels[i];
2850  break;
2851  }
2852  }
2853  if (!hwaccel) {
2854  // No hwaccel supporting this pixfmt.
2855  continue;
2856  }
2857  if (hwaccel->id != ist->hwaccel_id) {
2858  // Does not match requested hwaccel.
2859  continue;
2860  }
2861 
2862  ret = hwaccel->init(s);
2863  if (ret < 0) {
2865  "%s hwaccel requested for input stream #%d:%d, "
2866  "but cannot be initialized.\n", hwaccel->name,
2867  ist->file_index, ist->st->index);
2868  return AV_PIX_FMT_NONE;
2869  }
2870  }
2871 
2872  if (ist->hw_frames_ctx) {
2874  if (!s->hw_frames_ctx)
2875  return AV_PIX_FMT_NONE;
2876  }
2877 
2878  ist->hwaccel_pix_fmt = *p;
2879  break;
2880  }
2881 
2882  return *p;
2883 }
2884 
2886 {
2887  InputStream *ist = s->opaque;
2888 
2889  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2890  return ist->hwaccel_get_buffer(s, frame, flags);
2891 
2892  return avcodec_default_get_buffer2(s, frame, flags);
2893 }
2894 
2895 static int init_input_stream(int ist_index, char *error, int error_len)
2896 {
2897  int ret;
2898  InputStream *ist = input_streams[ist_index];
2899 
2900  if (ist->decoding_needed) {
2901  AVCodec *codec = ist->dec;
2902  if (!codec) {
2903  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2904  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2905  return AVERROR(EINVAL);
2906  }
2907 
2908  ist->dec_ctx->opaque = ist;
2909  ist->dec_ctx->get_format = get_format;
2910  ist->dec_ctx->get_buffer2 = get_buffer;
2911  ist->dec_ctx->thread_safe_callbacks = 1;
2912 
2913  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2914  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2915  (ist->decoding_needed & DECODING_FOR_OST)) {
2916  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2918  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2919  }
2920 
2921  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2922 
2923  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2924  * audio, and video decoders such as cuvid or mediacodec */
2925  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2926 
2927  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2928  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2929  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2931  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2932 
2933  ret = hw_device_setup_for_decode(ist);
2934  if (ret < 0) {
2935  snprintf(error, error_len, "Device setup failed for "
2936  "decoder on input stream #%d:%d : %s",
2937  ist->file_index, ist->st->index, av_err2str(ret));
2938  return ret;
2939  }
2940 
2941  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2942  if (ret == AVERROR_EXPERIMENTAL)
2943  abort_codec_experimental(codec, 0);
2944 
2945  snprintf(error, error_len,
2946  "Error while opening decoder for input stream "
2947  "#%d:%d : %s",
2948  ist->file_index, ist->st->index, av_err2str(ret));
2949  return ret;
2950  }
2952  }
2953 
2954  ist->next_pts = AV_NOPTS_VALUE;
2955  ist->next_dts = AV_NOPTS_VALUE;
2956 
2957  return 0;
2958 }
2959 
2961 {
2962  if (ost->source_index >= 0)
2963  return input_streams[ost->source_index];
2964  return NULL;
2965 }
2966 
2967 static int compare_int64(const void *a, const void *b)
2968 {
2969  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2970 }
2971 
2972 /* open the muxer when all the streams are initialized */
2973 static int check_init_output_file(OutputFile *of, int file_index)
2974 {
2975  int ret, i;
2976 
2977  for (i = 0; i < of->ctx->nb_streams; i++) {
2978  OutputStream *ost = output_streams[of->ost_index + i];
2979  if (!ost->initialized)
2980  return 0;
2981  }
2982 
2983  of->ctx->interrupt_callback = int_cb;
2984 
2985  ret = avformat_write_header(of->ctx, &of->opts);
2986  if (ret < 0) {
2988  "Could not write header for output file #%d "
2989  "(incorrect codec parameters ?): %s\n",
2990  file_index, av_err2str(ret));
2991  return ret;
2992  }
2993  //assert_avoptions(of->opts);
2994  of->header_written = 1;
2995 
2996  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2997 
2998  if (sdp_filename || want_sdp)
2999  print_sdp();
3000 
3001  /* flush the muxing queues */
3002  for (i = 0; i < of->ctx->nb_streams; i++) {
3003  OutputStream *ost = output_streams[of->ost_index + i];
3004 
3005  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3006  if (!av_fifo_size(ost->muxing_queue))
3007  ost->mux_timebase = ost->st->time_base;
3008 
3009  while (av_fifo_size(ost->muxing_queue)) {
3010  AVPacket pkt;
3011  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3012  write_packet(of, &pkt, ost, 1);
3013  }
3014  }
3015 
3016  return 0;
3017 }
3018 
3020 {
3021  AVBSFContext *ctx;
3022  int i, ret;
3023 
3024  if (!ost->nb_bitstream_filters)
3025  return 0;
3026 
3027  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3028  ctx = ost->bsf_ctx[i];
3029 
3030  ret = avcodec_parameters_copy(ctx->par_in,
3031  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3032  if (ret < 0)
3033  return ret;
3034 
3035  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3036 
3037  ret = av_bsf_init(ctx);
3038  if (ret < 0) {
3039  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3040  ost->bsf_ctx[i]->filter->name);
3041  return ret;
3042  }
3043  }
3044 
3045  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3046  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3047  if (ret < 0)
3048  return ret;
3049 
3050  ost->st->time_base = ctx->time_base_out;
3051 
3052  return 0;
3053 }
3054 
3056 {
3057  OutputFile *of = output_files[ost->file_index];
3058  InputStream *ist = get_input_stream(ost);
3059  AVCodecParameters *par_dst = ost->st->codecpar;
3060  AVCodecParameters *par_src = ost->ref_par;
3061  AVRational sar;
3062  int i, ret;
3063  uint32_t codec_tag = par_dst->codec_tag;
3064 
3065  av_assert0(ist && !ost->filter);
3066 
3067  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3068  if (ret >= 0)
3069  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3070  if (ret < 0) {
3072  "Error setting up codec context options.\n");
3073  return ret;
3074  }
3076 
3077  if (!codec_tag) {
3078  unsigned int codec_tag_tmp;
3079  if (!of->ctx->oformat->codec_tag ||
3080  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3081  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3082  codec_tag = par_src->codec_tag;
3083  }
3084 
3085  ret = avcodec_parameters_copy(par_dst, par_src);
3086  if (ret < 0)
3087  return ret;
3088 
3089  par_dst->codec_tag = codec_tag;
3090 
3091  if (!ost->frame_rate.num)
3092  ost->frame_rate = ist->framerate;
3093  ost->st->avg_frame_rate = ost->frame_rate;
3094 
3096  if (ret < 0)
3097  return ret;
3098 
3099  // copy timebase while removing common factors
3100  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3102 
3103  // copy estimated duration as a hint to the muxer
3104  if (ost->st->duration <= 0 && ist->st->duration > 0)
3105  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3106 
3107  // copy disposition
3108  ost->st->disposition = ist->st->disposition;
3109 
3110  if (ist->st->nb_side_data) {
3111  for (i = 0; i < ist->st->nb_side_data; i++) {
3112  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3113  uint8_t *dst_data;
3114 
3115  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3116  if (!dst_data)
3117  return AVERROR(ENOMEM);
3118  memcpy(dst_data, sd_src->data, sd_src->size);
3119  }
3120  }
3121 
3122  if (ost->rotate_overridden) {
3124  sizeof(int32_t) * 9);
3125  if (sd)
3126  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3127  }
3128 
3129  switch (par_dst->codec_type) {
3130  case AVMEDIA_TYPE_AUDIO:
3131  if (audio_volume != 256) {
3132  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3133  exit_program(1);
3134  }
3135  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3136  par_dst->block_align= 0;
3137  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3138  par_dst->block_align= 0;
3139  break;
3140  case AVMEDIA_TYPE_VIDEO:
3141  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3142  sar =
3143  av_mul_q(ost->frame_aspect_ratio,
3144  (AVRational){ par_dst->height, par_dst->width });
3145  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3146  "with stream copy may produce invalid files\n");
3147  }
3148  else if (ist->st->sample_aspect_ratio.num)
3149  sar = ist->st->sample_aspect_ratio;
3150  else
3151  sar = par_src->sample_aspect_ratio;
3152  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3153  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3154  ost->st->r_frame_rate = ist->st->r_frame_rate;
3155  break;
3156  }
3157 
3158  ost->mux_timebase = ist->st->time_base;
3159 
3160  return 0;
3161 }
3162 
3164 {
3165  AVDictionaryEntry *e;
3166 
3167  uint8_t *encoder_string;
3168  int encoder_string_len;
3169  int format_flags = 0;
3170  int codec_flags = ost->enc_ctx->flags;
3171 
3172  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3173  return;
3174 
3175  e = av_dict_get(of->opts, "fflags", NULL, 0);
3176  if (e) {
3177  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3178  if (!o)
3179  return;
3180  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3181  }
3182  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3183  if (e) {
3184  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3185  if (!o)
3186  return;
3187  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3188  }
3189 
3190  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3191  encoder_string = av_mallocz(encoder_string_len);
3192  if (!encoder_string)
3193  exit_program(1);
3194 
3195  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3196  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3197  else
3198  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3199  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3200  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3202 }
3203 
3205  AVCodecContext *avctx)
3206 {
3207  char *p;
3208  int n = 1, i, size, index = 0;
3209  int64_t t, *pts;
3210 
3211  for (p = kf; *p; p++)
3212  if (*p == ',')
3213  n++;
3214  size = n;
3215  pts = av_malloc_array(size, sizeof(*pts));
3216  if (!pts) {
3217  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3218  exit_program(1);
3219  }
3220 
3221  p = kf;
3222  for (i = 0; i < n; i++) {
3223  char *next = strchr(p, ',');
3224 
3225  if (next)
3226  *next++ = 0;
3227 
3228  if (!memcmp(p, "chapters", 8)) {
3229 
3230  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3231  int j;
3232 
3233  if (avf->nb_chapters > INT_MAX - size ||
3234  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3235  sizeof(*pts)))) {
3237  "Could not allocate forced key frames array.\n");
3238  exit_program(1);
3239  }
3240  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3241  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3242 
3243  for (j = 0; j < avf->nb_chapters; j++) {
3244  AVChapter *c = avf->chapters[j];
3245  av_assert1(index < size);
3246  pts[index++] = av_rescale_q(c->start, c->time_base,
3247  avctx->time_base) + t;
3248  }
3249 
3250  } else {
3251 
3252  t = parse_time_or_die("force_key_frames", p, 1);
3253  av_assert1(index < size);
3254  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3255 
3256  }
3257 
3258  p = next;
3259  }
3260 
3261  av_assert0(index == size);
3262  qsort(pts, size, sizeof(*pts), compare_int64);
3263  ost->forced_kf_count = size;
3264  ost->forced_kf_pts = pts;
3265 }
3266 
3267 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3268 {
3269  InputStream *ist = get_input_stream(ost);
3270  AVCodecContext *enc_ctx = ost->enc_ctx;
3271  AVFormatContext *oc;
3272 
3273  if (ost->enc_timebase.num > 0) {
3274  enc_ctx->time_base = ost->enc_timebase;
3275  return;
3276  }
3277 
3278  if (ost->enc_timebase.num < 0) {
3279  if (ist) {
3280  enc_ctx->time_base = ist->st->time_base;
3281  return;
3282  }
3283 
3284  oc = output_files[ost->file_index]->ctx;
3285  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3286  }
3287 
3288  enc_ctx->time_base = default_time_base;
3289 }
3290 
3292 {
3293  InputStream *ist = get_input_stream(ost);
3294  AVCodecContext *enc_ctx = ost->enc_ctx;
3296  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3297  int j, ret;
3298 
3299  set_encoder_id(output_files[ost->file_index], ost);
3300 
3301  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3302  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3303  // which have to be filtered out to prevent leaking them to output files.
3304  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3305 
3306  if (ist) {
3307  ost->st->disposition = ist->st->disposition;
3308 
3309  dec_ctx = ist->dec_ctx;
3310 
3311  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3312  } else {
3313  for (j = 0; j < oc->nb_streams; j++) {
3314  AVStream *st = oc->streams[j];
3315  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3316  break;
3317  }
3318  if (j == oc->nb_streams)
3319  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3322  }
3323 
3324  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3325  if (!ost->frame_rate.num)
3327  if (ist && !ost->frame_rate.num)
3328  ost->frame_rate = ist->framerate;
3329  if (ist && !ost->frame_rate.num)
3330  ost->frame_rate = ist->st->r_frame_rate;
3331  if (ist && !ost->frame_rate.num) {
3332  ost->frame_rate = (AVRational){25, 1};
3334  "No information "
3335  "about the input framerate is available. Falling "
3336  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3337  "if you want a different framerate.\n",
3338  ost->file_index, ost->index);
3339  }
3340 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3341  if (ost->enc->supported_framerates && !ost->force_fps) {
3342  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3343  ost->frame_rate = ost->enc->supported_framerates[idx];
3344  }
3345  // reduce frame rate for mpeg4 to be within the spec limits
3346  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3347  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3348  ost->frame_rate.num, ost->frame_rate.den, 65535);
3349  }
3350  }
3351 
3352  switch (enc_ctx->codec_type) {
3353  case AVMEDIA_TYPE_AUDIO:
3355  if (dec_ctx)
3356  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3357  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3361 
3362  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3363  break;
3364 
3365  case AVMEDIA_TYPE_VIDEO:
3367 
3368  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3370  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3372  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3373  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3374  }
3375  for (j = 0; j < ost->forced_kf_count; j++)
3376  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3378  enc_ctx->time_base);
3379 
3380  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3381  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3382  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3383  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3384  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3386 
3387  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3388  if (dec_ctx)
3389  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3390  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3391 
3392  enc_ctx->framerate = ost->frame_rate;
3393 
3394  ost->st->avg_frame_rate = ost->frame_rate;
3395 
3396  if (!dec_ctx ||
3397  enc_ctx->width != dec_ctx->width ||
3398  enc_ctx->height != dec_ctx->height ||
3399  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3401  }
3402 
3403  if (ost->top_field_first == 0) {
3404  enc_ctx->field_order = AV_FIELD_BB;
3405  } else if (ost->top_field_first == 1) {
3406  enc_ctx->field_order = AV_FIELD_TT;
3407  }
3408 
3409  if (ost->forced_keyframes) {
3410  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3413  if (ret < 0) {
3415  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3416  return ret;
3417  }
3422 
3423  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3424  // parse it only for static kf timings
3425  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3427  }
3428  }
3429  break;
3430  case AVMEDIA_TYPE_SUBTITLE:
3431  enc_ctx->time_base = AV_TIME_BASE_Q;
3432  if (!enc_ctx->width) {
3433  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3434  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3435  }
3436  break;
3437  case AVMEDIA_TYPE_DATA:
3438  break;
3439  default:
3440  abort();
3441  break;
3442  }
3443 
3444  ost->mux_timebase = enc_ctx->time_base;
3445 
3446  return 0;
3447 }
3448 
3449 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3450 {
3451  int ret = 0;
3452 
3453  if (ost->encoding_needed) {
3454  AVCodec *codec = ost->enc;
3455  AVCodecContext *dec = NULL;
3456  InputStream *ist;
3457 
3458  ret = init_output_stream_encode(ost);
3459  if (ret < 0)
3460  return ret;
3461 
3462  if ((ist = get_input_stream(ost)))
3463  dec = ist->dec_ctx;
3464  if (dec && dec->subtitle_header) {
3465  /* ASS code assumes this buffer is null terminated so add extra byte. */
3467  if (!ost->enc_ctx->subtitle_header)
3468  return AVERROR(ENOMEM);
3469  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3471  }
3472  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3473  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3474  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3475  !codec->defaults &&
3476  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3477  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3478  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3479 
3480  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3484  if (!ost->enc_ctx->hw_frames_ctx)
3485  return AVERROR(ENOMEM);
3486  } else {
3487  ret = hw_device_setup_for_encode(ost);
3488  if (ret < 0) {
3489  snprintf(error, error_len, "Device setup failed for "
3490  "encoder on output stream #%d:%d : %s",
3491  ost->file_index, ost->index, av_err2str(ret));
3492  return ret;
3493  }
3494  }
3495  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3496  int input_props = 0, output_props = 0;
3497  AVCodecDescriptor const *input_descriptor =
3499  AVCodecDescriptor const *output_descriptor =
3501  if (input_descriptor)
3502  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3503  if (output_descriptor)
3504  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3505  if (input_props && output_props && input_props != output_props) {
3506  snprintf(error, error_len,
3507  "Subtitle encoding currently only possible from text to text "
3508  "or bitmap to bitmap");
3509  return AVERROR_INVALIDDATA;
3510  }
3511  }
3512 
3513  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3514  if (ret == AVERROR_EXPERIMENTAL)
3515  abort_codec_experimental(codec, 1);
3516  snprintf(error, error_len,
3517  "Error while opening encoder for output stream #%d:%d - "
3518  "maybe incorrect parameters such as bit_rate, rate, width or height",
3519  ost->file_index, ost->index);
3520  return ret;
3521  }
3522  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3523  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3525  ost->enc_ctx->frame_size);
3527  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3528  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3529  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3530  " It takes bits/s as argument, not kbits/s\n");
3531 
3533  if (ret < 0) {
3535  "Error initializing the output stream codec context.\n");
3536  exit_program(1);
3537  }
3538  /*
3539  * FIXME: ost->st->codec should't be needed here anymore.
3540  */
3541  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3542  if (ret < 0)
3543  return ret;
3544 
3545  if (ost->enc_ctx->nb_coded_side_data) {
3546  int i;
3547 
3548  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3549  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3550  uint8_t *dst_data;
3551 
3552  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3553  if (!dst_data)
3554  return AVERROR(ENOMEM);
3555  memcpy(dst_data, sd_src->data, sd_src->size);
3556  }
3557  }
3558 
3559  /*
3560  * Add global input side data. For now this is naive, and copies it
3561  * from the input stream's global side data. All side data should
3562  * really be funneled over AVFrame and libavfilter, then added back to
3563  * packet side data, and then potentially using the first packet for
3564  * global side data.
3565  */
3566  if (ist) {
3567  int i;
3568  for (i = 0; i < ist->st->nb_side_data; i++) {
3569  AVPacketSideData *sd = &ist->st->side_data[i];
3570  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3571  if (!dst)
3572  return AVERROR(ENOMEM);
3573  memcpy(dst, sd->data, sd->size);
3574  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3575  av_display_rotation_set((uint32_t *)dst, 0);
3576  }
3577  }
3578 
3579  // copy timebase while removing common factors
3580  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3581  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3582 
3583  // copy estimated duration as a hint to the muxer
3584  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3585  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3586 
3587  ost->st->codec->codec= ost->enc_ctx->codec;
3588  } else if (ost->stream_copy) {
3590  if (ret < 0)
3591  return ret;
3592  }
3593 
3594  // parse user provided disposition, and update stream values
3595  if (ost->disposition) {
3596  static const AVOption opts[] = {
3597  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3598  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3599  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3600  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3601  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3602  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3603  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3604  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3605  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3606  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3607  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3608  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3609  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3610  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3611  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3612  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3613  { NULL },
3614  };
3615  static const AVClass class = {
3616  .class_name = "",
3617  .item_name = av_default_item_name,
3618  .option = opts,
3619  .version = LIBAVUTIL_VERSION_INT,
3620  };
3621  const AVClass *pclass = &class;
3622 
3623  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3624  if (ret < 0)
3625  return ret;
3626  }
3627 
3628  /* initialize bitstream filters for the output stream
3629  * needs to be done here, because the codec id for streamcopy is not
3630  * known until now */
3631  ret = init_output_bsfs(ost);
3632  if (ret < 0)
3633  return ret;
3634 
3635  ost->initialized = 1;
3636 
3637  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3638  if (ret < 0)
3639  return ret;
3640 
3641  return ret;
3642 }
3643 
3644 static void report_new_stream(int input_index, AVPacket *pkt)
3645 {
3646  InputFile *file = input_files[input_index];
3647  AVStream *st = file->ctx->streams[pkt->stream_index];
3648 
3649  if (pkt->stream_index < file->nb_streams_warn)
3650  return;
3651  av_log(file->ctx, AV_LOG_WARNING,
3652  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3654  input_index, pkt->stream_index,
3655  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3656  file->nb_streams_warn = pkt->stream_index + 1;
3657 }
3658 
3659 static int transcode_init(void)
3660 {
3661  int ret = 0, i, j, k;
3662  AVFormatContext *oc;
3663  OutputStream *ost;
3664  InputStream *ist;
3665  char error[1024] = {0};
3666 
3667  for (i = 0; i < nb_filtergraphs; i++) {
3668  FilterGraph *fg = filtergraphs[i];
3669  for (j = 0; j < fg->nb_outputs; j++) {
3670  OutputFilter *ofilter = fg->outputs[j];
3671  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3672  continue;
3673  if (fg->nb_inputs != 1)
3674  continue;
3675  for (k = nb_input_streams-1; k >= 0 ; k--)
3676  if (fg->inputs[0]->ist == input_streams[k])
3677  break;
3678  ofilter->ost->source_index = k;
3679  }
3680  }
3681 
3682  /* init framerate emulation */
3683  for (i = 0; i < nb_input_files; i++) {
3684  InputFile *ifile = input_files[i];
3685  if (ifile->rate_emu)
3686  for (j = 0; j < ifile->nb_streams; j++)
3687  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3688  }
3689 
3690  /* init input streams */
3691  for (i = 0; i < nb_input_streams; i++)
3692  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3693  for (i = 0; i < nb_output_streams; i++) {
3694  ost = output_streams[i];
3695  avcodec_close(ost->enc_ctx);
3696  }
3697  goto dump_format;
3698  }
3699 
3700  /* open each encoder */
3701  for (i = 0; i < nb_output_streams; i++) {
3702  // skip streams fed from filtergraphs until we have a frame for them
3703  if (output_streams[i]->filter)
3704  continue;
3705 
3706  ret = init_output_stream(output_streams[i], error, sizeof(error));
3707  if (ret < 0)
3708  goto dump_format;
3709  }
3710 
3711  /* discard unused programs */
3712  for (i = 0; i < nb_input_files; i++) {
3713  InputFile *ifile = input_files[i];
3714  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3715  AVProgram *p = ifile->ctx->programs[j];
3716  int discard = AVDISCARD_ALL;
3717 
3718  for (k = 0; k < p->nb_stream_indexes; k++)
3719  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3720  discard = AVDISCARD_DEFAULT;
3721  break;
3722  }
3723  p->discard = discard;
3724  }
3725  }
3726 
3727  /* write headers for files with no streams */
3728  for (i = 0; i < nb_output_files; i++) {
3729  oc = output_files[i]->ctx;
3730  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3731  ret = check_init_output_file(output_files[i], i);
3732  if (ret < 0)
3733  goto dump_format;
3734  }
3735  }
3736 
3737  dump_format:
3738  /* dump the stream mapping */
3739  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3740  for (i = 0; i < nb_input_streams; i++) {
3741  ist = input_streams[i];
3742 
3743  for (j = 0; j < ist->nb_filters; j++) {
3744  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3745  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3746  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3747  ist->filters[j]->name);
3748  if (nb_filtergraphs > 1)
3749  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3750  av_log(NULL, AV_LOG_INFO, "\n");
3751  }
3752  }
3753  }
3754 
3755  for (i = 0; i < nb_output_streams; i++) {
3756  ost = output_streams[i];
3757 
3758  if (ost->attachment_filename) {
3759  /* an attached file */
3760  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3761  ost->attachment_filename, ost->file_index, ost->index);
3762  continue;
3763  }
3764 
3765  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3766  /* output from a complex graph */
3767  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3768  if (nb_filtergraphs > 1)
3769  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3770 
3771  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3772  ost->index, ost->enc ? ost->enc->name : "?");
3773  continue;
3774  }
3775 
3776  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3777  input_streams[ost->source_index]->file_index,
3778  input_streams[ost->source_index]->st->index,
3779  ost->file_index,
3780  ost->index);
3781  if (ost->sync_ist != input_streams[ost->source_index])
3782  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3783  ost->sync_ist->file_index,
3784  ost->sync_ist->st->index);
3785  if (ost->stream_copy)
3786  av_log(NULL, AV_LOG_INFO, " (copy)");
3787  else {
3788  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3789  const AVCodec *out_codec = ost->enc;
3790  const char *decoder_name = "?";
3791  const char *in_codec_name = "?";
3792  const char *encoder_name = "?";
3793  const char *out_codec_name = "?";
3794  const AVCodecDescriptor *desc;
3795 
3796  if (in_codec) {
3797  decoder_name = in_codec->name;
3798  desc = avcodec_descriptor_get(in_codec->id);
3799  if (desc)
3800  in_codec_name = desc->name;
3801  if (!strcmp(decoder_name, in_codec_name))
3802  decoder_name = "native";
3803  }
3804 
3805  if (out_codec) {
3806  encoder_name = out_codec->name;
3807  desc = avcodec_descriptor_get(out_codec->id);
3808  if (desc)
3809  out_codec_name = desc->name;
3810  if (!strcmp(encoder_name, out_codec_name))
3811  encoder_name = "native";
3812  }
3813 
3814  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3815  in_codec_name, decoder_name,
3816  out_codec_name, encoder_name);
3817  }
3818  av_log(NULL, AV_LOG_INFO, "\n");
3819  }
3820 
3821  if (ret) {
3822  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3823  return ret;
3824  }
3825 
3827 
3828  return 0;
3829 }
3830 
3831 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3832 static int need_output(void)
3833 {
3834  int i;
3835 
3836  for (i = 0; i < nb_output_streams; i++) {
3837  OutputStream *ost = output_streams[i];
3838  OutputFile *of = output_files[ost->file_index];
3839  AVFormatContext *os = output_files[ost->file_index]->ctx;
3840 
3841  if (ost->finished ||
3842  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3843  continue;
3844  if (ost->frame_number >= ost->max_frames) {
3845  int j;
3846  for (j = 0; j < of->ctx->nb_streams; j++)
3847  close_output_stream(output_streams[of->ost_index + j]);
3848  continue;
3849  }
3850 
3851  return 1;
3852  }
3853 
3854  return 0;
3855 }
3856 
3857 /**
3858  * Select the output stream to process.
3859  *
3860  * @return selected output stream, or NULL if none available
3861  */
3863 {
3864  int i;
3865  int64_t opts_min = INT64_MAX;
3866  OutputStream *ost_min = NULL;
3867 
3868  for (i = 0; i < nb_output_streams; i++) {
3869  OutputStream *ost = output_streams[i];
3870  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3871  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3872  AV_TIME_BASE_Q);
3873  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3874  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3875 
3876  if (!ost->initialized && !ost->inputs_done)
3877  return ost;
3878 
3879  if (!ost->finished && opts < opts_min) {
3880  opts_min = opts;
3881  ost_min = ost->unavailable ? NULL : ost;
3882  }
3883  }
3884  return ost_min;
3885 }
3886 
3887 static void set_tty_echo(int on)
3888 {
3889 #if HAVE_TERMIOS_H
3890  struct termios tty;
3891  if (tcgetattr(0, &tty) == 0) {
3892  if (on) tty.c_lflag |= ECHO;
3893  else tty.c_lflag &= ~ECHO;
3894  tcsetattr(0, TCSANOW, &tty);
3895  }
3896 #endif
3897 }
3898 
3899 static int check_keyboard_interaction(int64_t cur_time)
3900 {
3901  int i, ret, key;
3902  static int64_t last_time;
3903  if (received_nb_signals)
3904  return AVERROR_EXIT;
3905  /* read_key() returns 0 on EOF */
3906  if(cur_time - last_time >= 100000 && !run_as_daemon){
3907  key = read_key();
3908  last_time = cur_time;
3909  }else
3910  key = -1;
3911  if (key == 'q')
3912  return AVERROR_EXIT;
3913  if (key == '+') av_log_set_level(av_log_get_level()+10);
3914  if (key == '-') av_log_set_level(av_log_get_level()-10);
3915  if (key == 's') qp_hist ^= 1;
3916  if (key == 'h'){
3917  if (do_hex_dump){
3918  do_hex_dump = do_pkt_dump = 0;
3919  } else if(do_pkt_dump){
3920  do_hex_dump = 1;
3921  } else
3922  do_pkt_dump = 1;
3924  }
3925  if (key == 'c' || key == 'C'){
3926  char buf[4096], target[64], command[256], arg[256] = {0};
3927  double time;
3928  int k, n = 0;
3929  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3930  i = 0;
3931  set_tty_echo(1);
3932  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3933  if (k > 0)
3934  buf[i++] = k;
3935  buf[i] = 0;
3936  set_tty_echo(0);
3937  fprintf(stderr, "\n");
3938  if (k > 0 &&
3939  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3940  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3941  target, time, command, arg);
3942  for (i = 0; i < nb_filtergraphs; i++) {
3943  FilterGraph *fg = filtergraphs[i];
3944  if (fg->graph) {
3945  if (time < 0) {
3946  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3947  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3948  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3949  } else if (key == 'c') {
3950  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3951  ret = AVERROR_PATCHWELCOME;
3952  } else {
3953  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3954  if (ret < 0)
3955  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3956  }
3957  }
3958  }
3959  } else {
3961  "Parse error, at least 3 arguments were expected, "
3962  "only %d given in string '%s'\n", n, buf);
3963  }
3964  }
3965  if (key == 'd' || key == 'D'){
3966  int debug=0;
3967  if(key == 'D') {
3968  debug = input_streams[0]->st->codec->debug<<1;
3969  if(!debug) debug = 1;
3970  while(debug & (FF_DEBUG_DCT_COEFF
3971 #if FF_API_DEBUG_MV
3972  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3973 #endif
3974  )) //unsupported, would just crash
3975  debug += debug;
3976  }else{
3977  char buf[32];
3978  int k = 0;
3979  i = 0;
3980  set_tty_echo(1);
3981  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3982  if (k > 0)
3983  buf[i++] = k;
3984  buf[i] = 0;
3985  set_tty_echo(0);
3986  fprintf(stderr, "\n");
3987  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3988  fprintf(stderr,"error parsing debug value\n");
3989  }
3990  for(i=0;i<nb_input_streams;i++) {
3991  input_streams[i]->st->codec->debug = debug;
3992  }
3993  for(i=0;i<nb_output_streams;i++) {
3994  OutputStream *ost = output_streams[i];
3995  ost->enc_ctx->debug = debug;
3996  }
3997  if(debug) av_log_set_level(AV_LOG_DEBUG);
3998  fprintf(stderr,"debug=%d\n", debug);
3999  }
4000  if (key == '?'){
4001  fprintf(stderr, "key function\n"
4002  "? show this help\n"
4003  "+ increase verbosity\n"
4004  "- decrease verbosity\n"
4005  "c Send command to first matching filter supporting it\n"
4006  "C Send/Queue command to all matching filters\n"
4007  "D cycle through available debug modes\n"
4008  "h dump packets/hex press to cycle through the 3 states\n"
4009  "q quit\n"
4010  "s Show QP histogram\n"
4011  );
4012  }
4013  return 0;
4014 }
4015 
4016 #if HAVE_THREADS
4017 static void *input_thread(void *arg)
4018 {
4019  InputFile *f = arg;
4020  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4021  int ret = 0;
4022 
4023  while (1) {
4024  AVPacket pkt;
4025  ret = av_read_frame(f->ctx, &pkt);
4026 
4027  if (ret == AVERROR(EAGAIN)) {
4028  av_usleep(10000);
4029  continue;
4030  }
4031  if (ret < 0) {
4032  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4033  break;
4034  }
4035  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4036  if (flags && ret == AVERROR(EAGAIN)) {
4037  flags = 0;
4038  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4040  "Thread message queue blocking; consider raising the "
4041  "thread_queue_size option (current value: %d)\n",
4042  f->thread_queue_size);
4043  }
4044  if (ret < 0) {
4045  if (ret != AVERROR_EOF)
4046  av_log(f->ctx, AV_LOG_ERROR,
4047  "Unable to send packet to main thread: %s\n",
4048  av_err2str(ret));
4049  av_packet_unref(&pkt);
4050  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4051  break;
4052  }
4053  }
4054 
4055  return NULL;
4056 }
4057 
4058 static void free_input_thread(int i)
4059 {
4060  InputFile *f = input_files[i];
4061  AVPacket pkt;
4062 
4063  if (!f || !f->in_thread_queue)
4064  return;
4066  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4067  av_packet_unref(&pkt);
4068 
4069  pthread_join(f->thread, NULL);
4070  f->joined = 1;
4071  av_thread_message_queue_free(&f->in_thread_queue);
4072 }
4073 
4074 static void free_input_threads(void)
4075 {
4076  int i;
4077 
4078  for (i = 0; i < nb_input_files; i++)
4079  free_input_thread(i);
4080 }
4081 
4082 static int init_input_thread(int i)
4083 {
4084  int ret;
4085  InputFile *f = input_files[i];
4086 
4087  if (nb_input_files == 1)
4088  return 0;
4089 
4090  if (f->ctx->pb ? !f->ctx->pb->seekable :
4091  strcmp(f->ctx->iformat->name, "lavfi"))
4092  f->non_blocking = 1;
4093  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4094  f->thread_queue_size, sizeof(AVPacket));
4095  if (ret < 0)
4096  return ret;
4097 
4098  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4099  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4100  av_thread_message_queue_free(&f->in_thread_queue);
4101  return AVERROR(ret);
4102  }
4103 
4104  return 0;
4105 }
4106 
4107 static int init_input_threads(void)
4108 {
4109  int i, ret;
4110 
4111  for (i = 0; i < nb_input_files; i++) {
4112  ret = init_input_thread(i);
4113  if (ret < 0)
4114  return ret;
4115  }
4116  return 0;
4117 }
4118 
4119 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4120 {
4121  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4122  f->non_blocking ?
4124 }
4125 #endif
4126 
4128 {
4129  if (f->rate_emu) {
4130  int i;
4131  for (i = 0; i < f->nb_streams; i++) {
4132  InputStream *ist = input_streams[f->ist_index + i];
4133  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4134  int64_t now = av_gettime_relative() - ist->start;
4135  if (pts > now)
4136  return AVERROR(EAGAIN);
4137  }
4138  }
4139 
4140 #if HAVE_THREADS
4141  if (nb_input_files > 1)
4142  return get_input_packet_mt(f, pkt);
4143 #endif
4144  return av_read_frame(f->ctx, pkt);
4145 }
4146 
4147 static int got_eagain(void)
4148 {
4149  int i;
4150  for (i = 0; i < nb_output_streams; i++)
4151  if (output_streams[i]->unavailable)
4152  return 1;
4153  return 0;
4154 }
4155 
4156 static void reset_eagain(void)
4157 {
4158  int i;
4159  for (i = 0; i < nb_input_files; i++)
4160  input_files[i]->eagain = 0;
4161  for (i = 0; i < nb_output_streams; i++)
4162  output_streams[i]->unavailable = 0;
4163 }
4164 
4165 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4166 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4167  AVRational time_base)
4168 {
4169  int ret;
4170 
4171  if (!*duration) {
4172  *duration = tmp;
4173  return tmp_time_base;
4174  }
4175 
4176  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4177  if (ret < 0) {
4178  *duration = tmp;
4179  return tmp_time_base;
4180  }
4181 
4182  return time_base;
4183 }
4184 
4186 {
4187  InputStream *ist;
4188  AVCodecContext *avctx;
4189  int i, ret, has_audio = 0;
4190  int64_t duration = 0;
4191 
4192  ret = av_seek_frame(is, -1, is->start_time, 0);
4193  if (ret < 0)
4194  return ret;
4195 
4196  for (i = 0; i < ifile->nb_streams; i++) {
4197  ist = input_streams[ifile->ist_index + i];
4198  avctx = ist->dec_ctx;
4199 
4200  /* duration is the length of the last frame in a stream
4201  * when audio stream is present we don't care about
4202  * last video frame length because it's not defined exactly */
4203  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4204  has_audio = 1;
4205  }
4206 
4207  for (i = 0; i < ifile->nb_streams; i++) {
4208  ist = input_streams[ifile->ist_index + i];
4209  avctx = ist->dec_ctx;
4210 
4211  if (has_audio) {
4212  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4213  AVRational sample_rate = {1, avctx->sample_rate};
4214 
4215  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4216  } else {
4217  continue;
4218  }
4219  } else {
4220  if (ist->framerate.num) {
4221  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4222  } else if (ist->st->avg_frame_rate.num) {
4223  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4224  } else {
4225  duration = 1;
4226  }
4227  }
4228  if (!ifile->duration)
4229  ifile->time_base = ist->st->time_base;
4230  /* the total duration of the stream, max_pts - min_pts is
4231  * the duration of the stream without the last frame */
4232  duration += ist->max_pts - ist->min_pts;
4233  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4234  ifile->time_base);
4235  }
4236 
4237  if (ifile->loop > 0)
4238  ifile->loop--;
4239 
4240  return ret;
4241 }
4242 
4243 /*
4244  * Return
4245  * - 0 -- one packet was read and processed
4246  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4247  * this function should be called again
4248  * - AVERROR_EOF -- this function should not be called again
4249  */
4250 static int process_input(int file_index)
4251 {
4252  InputFile *ifile = input_files[file_index];
4253  AVFormatContext *is;
4254  InputStream *ist;
4255  AVPacket pkt;
4256  int ret, thread_ret, i, j;
4257  int64_t duration;
4258  int64_t pkt_dts;
4259 
4260  is = ifile->ctx;
4261  ret = get_input_packet(ifile, &pkt);
4262 
4263  if (ret == AVERROR(EAGAIN)) {
4264  ifile->eagain = 1;
4265  return ret;
4266  }
4267  if (ret < 0 && ifile->loop) {
4268  AVCodecContext *avctx;
4269  for (i = 0; i < ifile->nb_streams; i++) {
4270  ist = input_streams[ifile->ist_index + i];
4271  avctx = ist->dec_ctx;
4272  if (ist->decoding_needed) {
4273  ret = process_input_packet(ist, NULL, 1);
4274  if (ret>0)
4275  return 0;
4276  avcodec_flush_buffers(avctx);
4277  }
4278  }
4279 #if HAVE_THREADS
4280  free_input_thread(file_index);
4281 #endif
4282  ret = seek_to_start(ifile, is);
4283 #if HAVE_THREADS
4284  thread_ret = init_input_thread(file_index);
4285  if (thread_ret < 0)
4286  return thread_ret;
4287 #endif
4288  if (ret < 0)
4289  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4290  else
4291  ret = get_input_packet(ifile, &pkt);
4292  if (ret == AVERROR(EAGAIN)) {
4293  ifile->eagain = 1;
4294  return ret;
4295  }
4296  }
4297  if (ret < 0) {
4298  if (ret != AVERROR_EOF) {
4299  print_error(is->url, ret);
4300  if (exit_on_error)
4301  exit_program(1);
4302  }
4303 
4304  for (i = 0; i < ifile->nb_streams; i++) {
4305  ist = input_streams[ifile->ist_index + i];
4306  if (ist->decoding_needed) {
4307  ret = process_input_packet(ist, NULL, 0);
4308  if (ret>0)
4309  return 0;
4310  }
4311 
4312  /* mark all outputs that don't go through lavfi as finished */
4313  for (j = 0; j < nb_output_streams; j++) {
4314  OutputStream *ost = output_streams[j];
4315 
4316  if (ost->source_index == ifile->ist_index + i &&
4317  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4318  finish_output_stream(ost);
4319  }
4320  }
4321 
4322  ifile->eof_reached = 1;
4323  return AVERROR(EAGAIN);
4324  }
4325 
4326  reset_eagain();
4327 
4328  if (do_pkt_dump) {
4330  is->streams[pkt.stream_index]);
4331  }
4332  /* the following test is needed in case new streams appear
4333  dynamically in stream : we ignore them */
4334  if (pkt.stream_index >= ifile->nb_streams) {
4335  report_new_stream(file_index, &pkt);
4336  goto discard_packet;
4337  }
4338 
4339  ist = input_streams[ifile->ist_index + pkt.stream_index];
4340 
4341  ist->data_size += pkt.size;
4342  ist->nb_packets++;
4343 
4344  if (ist->discard)
4345  goto discard_packet;
4346 
4347  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4348  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4349  exit_program(1);
4350  }
4351 
4352  if (debug_ts) {
4353  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4354  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4358  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4359  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4360  av_ts2str(input_files[ist->file_index]->ts_offset),
4361  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4362  }
4363 
4364  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4365  int64_t stime, stime2;
4366  // Correcting starttime based on the enabled streams
4367  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4368  // so we instead do it here as part of discontinuity handling
4369  if ( ist->next_dts == AV_NOPTS_VALUE
4370  && ifile->ts_offset == -is->start_time
4371  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4372  int64_t new_start_time = INT64_MAX;
4373  for (i=0; i<is->nb_streams; i++) {
4374  AVStream *st = is->streams[i];
4375  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4376  continue;
4377  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4378  }
4379  if (new_start_time > is->start_time) {
4380  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4381  ifile->ts_offset = -new_start_time;
4382  }
4383  }
4384 
4385  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4386  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4387  ist->wrap_correction_done = 1;
4388 
4389  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4390  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4391  ist->wrap_correction_done = 0;
4392  }
4393  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4394  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4395  ist->wrap_correction_done = 0;
4396  }
4397  }
4398 
4399  /* add the stream-global side data to the first packet */
4400  if (ist->nb_packets == 1) {
4401  for (i = 0; i < ist->st->nb_side_data; i++) {
4402  AVPacketSideData *src_sd = &ist->st->side_data[i];
4403  uint8_t *dst_data;
4404 
4405  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4406  continue;
4407 
4408  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4409  continue;
4410 
4411  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4412  if (!dst_data)
4413  exit_program(1);
4414 
4415  memcpy(dst_data, src_sd->data, src_sd->size);
4416  }
4417  }
4418 
4419  if (pkt.dts != AV_NOPTS_VALUE)
4420  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4421  if (pkt.pts != AV_NOPTS_VALUE)
4422  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4423 
4424  if (pkt.pts != AV_NOPTS_VALUE)
4425  pkt.pts *= ist->ts_scale;
4426  if (pkt.dts != AV_NOPTS_VALUE)
4427  pkt.dts *= ist->ts_scale;
4428 
4430  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4432  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4433  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4434  int64_t delta = pkt_dts - ifile->last_ts;
4435  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4436  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4437  ifile->ts_offset -= delta;
4439  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4440  delta, ifile->ts_offset);
4441  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4442  if (pkt.pts != AV_NOPTS_VALUE)
4443  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4444  }
4445  }
4446 
4447  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4448  if (pkt.pts != AV_NOPTS_VALUE) {
4449  pkt.pts += duration;
4450  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4451  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4452  }
4453 
4454  if (pkt.dts != AV_NOPTS_VALUE)
4455  pkt.dts += duration;
4456 
4458  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4460  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4461  !copy_ts) {
4462  int64_t delta = pkt_dts - ist->next_dts;
4463  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4464  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4465  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4466  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4467  ifile->ts_offset -= delta;
4469  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4470  delta, ifile->ts_offset);
4471  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4472  if (pkt.pts != AV_NOPTS_VALUE)
4473  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4474  }
4475  } else {
4476  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4477  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4478  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4479  pkt.dts = AV_NOPTS_VALUE;
4480  }
4481  if (pkt.pts != AV_NOPTS_VALUE){
4482  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4483  delta = pkt_pts - ist->next_dts;
4484  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4485  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4486  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4487  pkt.pts = AV_NOPTS_VALUE;
4488  }
4489  }
4490  }
4491  }
4492 
4493  if (pkt.dts != AV_NOPTS_VALUE)
4494  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4495 
4496  if (debug_ts) {
4497  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4499  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4500  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4501  av_ts2str(input_files[ist->file_index]->ts_offset),
4502  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4503  }
4504 
4505  sub2video_heartbeat(ist, pkt.pts);
4506 
4507  process_input_packet(ist, &pkt, 0);
4508 
4509 discard_packet:
4510  av_packet_unref(&pkt);
4511 
4512  return 0;
4513 }
4514 
4515 /**
4516  * Perform a step of transcoding for the specified filter graph.
4517  *
4518  * @param[in] graph filter graph to consider
4519  * @param[out] best_ist input stream where a frame would allow to continue
4520  * @return 0 for success, <0 for error
4521  */
4522 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4523 {
4524  int i, ret;
4525  int nb_requests, nb_requests_max = 0;
4526  InputFilter *ifilter;
4527  InputStream *ist;
4528 
4529  *best_ist = NULL;
4530  ret = avfilter_graph_request_oldest(graph->graph);
4531  if (ret >= 0)
4532  return reap_filters(0);
4533 
4534  if (ret == AVERROR_EOF) {
4535  ret = reap_filters(1);
4536  for (i = 0; i < graph->nb_outputs; i++)
4537  close_output_stream(graph->outputs[i]->ost);
4538  return ret;
4539  }
4540  if (ret != AVERROR(EAGAIN))
4541  return ret;
4542 
4543  for (i = 0; i < graph->nb_inputs; i++) {
4544  ifilter = graph->inputs[i];
4545  ist = ifilter->ist;
4546  if (input_files[ist->file_index]->eagain ||
4547  input_files[ist->file_index]->eof_reached)
4548  continue;
4549  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4550  if (nb_requests > nb_requests_max) {
4551  nb_requests_max = nb_requests;
4552  *best_ist = ist;
4553  }
4554  }
4555 
4556  if (!*best_ist)
4557  for (i = 0; i < graph->nb_outputs; i++)
4558  graph->outputs[i]->ost->unavailable = 1;
4559 
4560  return 0;
4561 }
4562 
4563 /**
4564  * Run a single step of transcoding.
4565  *
4566  * @return 0 for success, <0 for error
4567  */
4568 static int transcode_step(void)
4569 {
4570  OutputStream *ost;
4571  InputStream *ist = NULL;
4572  int ret;
4573 
4574  ost = choose_output();
4575  if (!ost) {
4576  if (got_eagain()) {
4577  reset_eagain();
4578  av_usleep(10000);
4579  return 0;
4580  }
4581  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4582  return AVERROR_EOF;
4583  }
4584 
4585  if (ost->filter && !ost->filter->graph->graph) {
4587  ret = configure_filtergraph(ost->filter->graph);
4588  if (ret < 0) {
4589  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4590  return ret;
4591  }
4592  }
4593  }
4594 
4595  if (ost->filter && ost->filter->graph->graph) {
4596  if (!ost->initialized) {
4597  char error[1024] = {0};
4598  ret = init_output_stream(ost, error, sizeof(error));
4599  if (ret < 0) {
4600  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4601  ost->file_index, ost->index, error);
4602  exit_program(1);
4603  }
4604  }
4605  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4606  return ret;
4607  if (!ist)
4608  return 0;
4609  } else if (ost->filter) {
4610  int i;
4611  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4612  InputFilter *ifilter = ost->filter->graph->inputs[i];
4613  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4614  ist = ifilter->ist;
4615  break;
4616  }
4617  }
4618  if (!ist) {
4619  ost->inputs_done = 1;
4620  return 0;
4621  }
4622  } else {
4623  av_assert0(ost->source_index >= 0);
4624  ist = input_streams[ost->source_index];