FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
320  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  read(0, &ch, 1);
472  return ch;
473  }else{
474  return -1;
475  }
476  }
477 # endif
478  if(kbhit())
479  return(getch());
480 #endif
481  return -1;
482 }
483 
484 static int decode_interrupt_cb(void *ctx)
485 {
487 }
488 
490 
491 static void ffmpeg_cleanup(int ret)
492 {
493  int i, j;
494 
495  if (do_benchmark) {
496  int maxrss = getmaxrss() / 1024;
497  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498  }
499 
500  for (i = 0; i < nb_filtergraphs; i++) {
501  FilterGraph *fg = filtergraphs[i];
503  for (j = 0; j < fg->nb_inputs; j++) {
504  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
505  AVFrame *frame;
506  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
507  sizeof(frame), NULL);
508  av_frame_free(&frame);
509  }
510  av_fifo_freep(&fg->inputs[j]->frame_queue);
511  if (fg->inputs[j]->ist->sub2video.sub_queue) {
512  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
513  AVSubtitle sub;
515  &sub, sizeof(sub), NULL);
516  avsubtitle_free(&sub);
517  }
519  }
521  av_freep(&fg->inputs[j]->name);
522  av_freep(&fg->inputs[j]);
523  }
524  av_freep(&fg->inputs);
525  for (j = 0; j < fg->nb_outputs; j++) {
526  av_freep(&fg->outputs[j]->name);
527  av_freep(&fg->outputs[j]->formats);
528  av_freep(&fg->outputs[j]->channel_layouts);
529  av_freep(&fg->outputs[j]->sample_rates);
530  av_freep(&fg->outputs[j]);
531  }
532  av_freep(&fg->outputs);
533  av_freep(&fg->graph_desc);
534 
535  av_freep(&filtergraphs[i]);
536  }
537  av_freep(&filtergraphs);
538 
540 
541  /* close files */
542  for (i = 0; i < nb_output_files; i++) {
543  OutputFile *of = output_files[i];
545  if (!of)
546  continue;
547  s = of->ctx;
548  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
549  avio_closep(&s->pb);
551  av_dict_free(&of->opts);
552 
553  av_freep(&output_files[i]);
554  }
555  for (i = 0; i < nb_output_streams; i++) {
556  OutputStream *ost = output_streams[i];
557 
558  if (!ost)
559  continue;
560 
561  for (j = 0; j < ost->nb_bitstream_filters; j++)
562  av_bsf_free(&ost->bsf_ctx[j]);
563  av_freep(&ost->bsf_ctx);
564 
566  av_frame_free(&ost->last_frame);
567  av_dict_free(&ost->encoder_opts);
568 
569  av_freep(&ost->forced_keyframes);
571  av_freep(&ost->avfilter);
572  av_freep(&ost->logfile_prefix);
573 
575  ost->audio_channels_mapped = 0;
576 
577  av_dict_free(&ost->sws_dict);
578  av_dict_free(&ost->swr_opts);
579 
582 
583  if (ost->muxing_queue) {
584  while (av_fifo_size(ost->muxing_queue)) {
585  AVPacket pkt;
586  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
587  av_packet_unref(&pkt);
588  }
590  }
591 
592  av_freep(&output_streams[i]);
593  }
594 #if HAVE_THREADS
595  free_input_threads();
596 #endif
597  for (i = 0; i < nb_input_files; i++) {
598  avformat_close_input(&input_files[i]->ctx);
599  av_freep(&input_files[i]);
600  }
601  for (i = 0; i < nb_input_streams; i++) {
602  InputStream *ist = input_streams[i];
603 
606  av_dict_free(&ist->decoder_opts);
609  av_freep(&ist->filters);
610  av_freep(&ist->hwaccel_device);
611  av_freep(&ist->dts_buffer);
612 
614 
615  av_freep(&input_streams[i]);
616  }
617 
618  if (vstats_file) {
619  if (fclose(vstats_file))
621  "Error closing vstats file, loss of information possible: %s\n",
622  av_err2str(AVERROR(errno)));
623  }
625 
626  av_freep(&input_streams);
627  av_freep(&input_files);
628  av_freep(&output_streams);
629  av_freep(&output_files);
630 
631  uninit_opts();
632 
634 
635  if (received_sigterm) {
636  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
637  (int) received_sigterm);
638  } else if (ret && atomic_load(&transcode_init_done)) {
639  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
640  }
641  term_exit();
642  ffmpeg_exited = 1;
643 }
644 
646 {
647  AVDictionaryEntry *t = NULL;
648 
649  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
651  }
652 }
653 
655 {
657  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
658  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
659  exit_program(1);
660  }
661 }
662 
663 static void abort_codec_experimental(AVCodec *c, int encoder)
664 {
665  exit_program(1);
666 }
667 
668 static void update_benchmark(const char *fmt, ...)
669 {
670  if (do_benchmark_all) {
672  va_list va;
673  char buf[1024];
674 
675  if (fmt) {
676  va_start(va, fmt);
677  vsnprintf(buf, sizeof(buf), fmt, va);
678  va_end(va);
680  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
681  t.user_usec - current_time.user_usec,
682  t.sys_usec - current_time.sys_usec,
683  t.real_usec - current_time.real_usec, buf);
684  }
685  current_time = t;
686  }
687 }
688 
690 {
691  int i;
692  for (i = 0; i < nb_output_streams; i++) {
693  OutputStream *ost2 = output_streams[i];
694  ost2->finished |= ost == ost2 ? this_stream : others;
695  }
696 }
697 
698 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
699 {
700  AVFormatContext *s = of->ctx;
701  AVStream *st = ost->st;
702  int ret;
703 
704  /*
705  * Audio encoders may split the packets -- #frames in != #packets out.
706  * But there is no reordering, so we can limit the number of output packets
707  * by simply dropping them here.
708  * Counting encoded video frames needs to be done separately because of
709  * reordering, see do_video_out().
710  * Do not count the packet when unqueued because it has been counted when queued.
711  */
712  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
713  if (ost->frame_number >= ost->max_frames) {
714  av_packet_unref(pkt);
715  return;
716  }
717  ost->frame_number++;
718  }
719 
720  if (!of->header_written) {
721  AVPacket tmp_pkt = {0};
722  /* the muxer is not initialized yet, buffer the packet */
723  if (!av_fifo_space(ost->muxing_queue)) {
724  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
725  ost->max_muxing_queue_size);
726  if (new_size <= av_fifo_size(ost->muxing_queue)) {
728  "Too many packets buffered for output stream %d:%d.\n",
729  ost->file_index, ost->st->index);
730  exit_program(1);
731  }
732  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
733  if (ret < 0)
734  exit_program(1);
735  }
736  ret = av_packet_make_refcounted(pkt);
737  if (ret < 0)
738  exit_program(1);
739  av_packet_move_ref(&tmp_pkt, pkt);
740  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
741  return;
742  }
743 
746  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
747 
748  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
749  int i;
751  NULL);
752  ost->quality = sd ? AV_RL32(sd) : -1;
753  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
754 
755  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
756  if (sd && i < sd[5])
757  ost->error[i] = AV_RL64(sd + 8 + 8*i);
758  else
759  ost->error[i] = -1;
760  }
761 
762  if (ost->frame_rate.num && ost->is_cfr) {
763  if (pkt->duration > 0)
764  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
765  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
766  ost->mux_timebase);
767  }
768  }
769 
770  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
771 
772  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
773  if (pkt->dts != AV_NOPTS_VALUE &&
774  pkt->pts != AV_NOPTS_VALUE &&
775  pkt->dts > pkt->pts) {
776  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
777  pkt->dts, pkt->pts,
778  ost->file_index, ost->st->index);
779  pkt->pts =
780  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
781  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
782  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
783  }
785  pkt->dts != AV_NOPTS_VALUE &&
786  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
787  ost->last_mux_dts != AV_NOPTS_VALUE) {
788  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
789  if (pkt->dts < max) {
790  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
791  av_log(s, loglevel, "Non-monotonous DTS in output stream "
792  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
793  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
794  if (exit_on_error) {
795  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
796  exit_program(1);
797  }
798  av_log(s, loglevel, "changing to %"PRId64". This may result "
799  "in incorrect timestamps in the output file.\n",
800  max);
801  if (pkt->pts >= pkt->dts)
802  pkt->pts = FFMAX(pkt->pts, max);
803  pkt->dts = max;
804  }
805  }
806  }
807  ost->last_mux_dts = pkt->dts;
808 
809  ost->data_size += pkt->size;
810  ost->packets_written++;
811 
812  pkt->stream_index = ost->index;
813 
814  if (debug_ts) {
815  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
816  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
818  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
819  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
820  pkt->size
821  );
822  }
823 
824  ret = av_interleaved_write_frame(s, pkt);
825  if (ret < 0) {
826  print_error("av_interleaved_write_frame()", ret);
827  main_return_code = 1;
829  }
830  av_packet_unref(pkt);
831 }
832 
834 {
835  OutputFile *of = output_files[ost->file_index];
836 
837  ost->finished |= ENCODER_FINISHED;
838  if (of->shortest) {
839  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
840  of->recording_time = FFMIN(of->recording_time, end);
841  }
842 }
843 
844 /*
845  * Send a single packet to the output, applying any bitstream filters
846  * associated with the output stream. This may result in any number
847  * of packets actually being written, depending on what bitstream
848  * filters are applied. The supplied packet is consumed and will be
849  * blank (as if newly-allocated) when this function returns.
850  *
851  * If eof is set, instead indicate EOF to all bitstream filters and
852  * therefore flush any delayed packets to the output. A blank packet
853  * must be supplied in this case.
854  */
856  OutputStream *ost, int eof)
857 {
858  int ret = 0;
859 
860  /* apply the output bitstream filters, if any */
861  if (ost->nb_bitstream_filters) {
862  int idx;
863 
864  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
865  if (ret < 0)
866  goto finish;
867 
868  eof = 0;
869  idx = 1;
870  while (idx) {
871  /* get a packet from the previous filter up the chain */
872  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
873  if (ret == AVERROR(EAGAIN)) {
874  ret = 0;
875  idx--;
876  continue;
877  } else if (ret == AVERROR_EOF) {
878  eof = 1;
879  } else if (ret < 0)
880  goto finish;
881 
882  /* send it to the next filter down the chain or to the muxer */
883  if (idx < ost->nb_bitstream_filters) {
884  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
885  if (ret < 0)
886  goto finish;
887  idx++;
888  eof = 0;
889  } else if (eof)
890  goto finish;
891  else
892  write_packet(of, pkt, ost, 0);
893  }
894  } else if (!eof)
895  write_packet(of, pkt, ost, 0);
896 
897 finish:
898  if (ret < 0 && ret != AVERROR_EOF) {
899  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
900  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
901  if(exit_on_error)
902  exit_program(1);
903  }
904 }
905 
907 {
908  OutputFile *of = output_files[ost->file_index];
909 
910  if (of->recording_time != INT64_MAX &&
912  AV_TIME_BASE_Q) >= 0) {
913  close_output_stream(ost);
914  return 0;
915  }
916  return 1;
917 }
918 
920  AVFrame *frame)
921 {
922  AVCodecContext *enc = ost->enc_ctx;
923  AVPacket pkt;
924  int ret;
925 
926  av_init_packet(&pkt);
927  pkt.data = NULL;
928  pkt.size = 0;
929 
930  if (!check_recording_time(ost))
931  return;
932 
933  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
934  frame->pts = ost->sync_opts;
935  ost->sync_opts = frame->pts + frame->nb_samples;
936  ost->samples_encoded += frame->nb_samples;
937  ost->frames_encoded++;
938 
939  av_assert0(pkt.size || !pkt.data);
941  if (debug_ts) {
942  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
943  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
944  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
945  enc->time_base.num, enc->time_base.den);
946  }
947 
948  ret = avcodec_send_frame(enc, frame);
949  if (ret < 0)
950  goto error;
951 
952  while (1) {
953  ret = avcodec_receive_packet(enc, &pkt);
954  if (ret == AVERROR(EAGAIN))
955  break;
956  if (ret < 0)
957  goto error;
958 
959  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
960 
961  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
962 
963  if (debug_ts) {
964  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
965  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
966  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
967  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
968  }
969 
970  output_packet(of, &pkt, ost, 0);
971  }
972 
973  return;
974 error:
975  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
976  exit_program(1);
977 }
978 
979 static void do_subtitle_out(OutputFile *of,
980  OutputStream *ost,
981  AVSubtitle *sub)
982 {
983  int subtitle_out_max_size = 1024 * 1024;
984  int subtitle_out_size, nb, i;
985  AVCodecContext *enc;
986  AVPacket pkt;
987  int64_t pts;
988 
989  if (sub->pts == AV_NOPTS_VALUE) {
990  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
991  if (exit_on_error)
992  exit_program(1);
993  return;
994  }
995 
996  enc = ost->enc_ctx;
997 
998  if (!subtitle_out) {
999  subtitle_out = av_malloc(subtitle_out_max_size);
1000  if (!subtitle_out) {
1001  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1002  exit_program(1);
1003  }
1004  }
1005 
1006  /* Note: DVB subtitle need one packet to draw them and one other
1007  packet to clear them */
1008  /* XXX: signal it in the codec context ? */
1009  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1010  nb = 2;
1011  else
1012  nb = 1;
1013 
1014  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1015  pts = sub->pts;
1016  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1017  pts -= output_files[ost->file_index]->start_time;
1018  for (i = 0; i < nb; i++) {
1019  unsigned save_num_rects = sub->num_rects;
1020 
1021  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1022  if (!check_recording_time(ost))
1023  return;
1024 
1025  sub->pts = pts;
1026  // start_display_time is required to be 0
1027  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1028  sub->end_display_time -= sub->start_display_time;
1029  sub->start_display_time = 0;
1030  if (i == 1)
1031  sub->num_rects = 0;
1032 
1033  ost->frames_encoded++;
1034 
1035  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1036  subtitle_out_max_size, sub);
1037  if (i == 1)
1038  sub->num_rects = save_num_rects;
1039  if (subtitle_out_size < 0) {
1040  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1041  exit_program(1);
1042  }
1043 
1044  av_init_packet(&pkt);
1045  pkt.data = subtitle_out;
1046  pkt.size = subtitle_out_size;
1047  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1048  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1049  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1050  /* XXX: the pts correction is handled here. Maybe handling
1051  it in the codec would be better */
1052  if (i == 0)
1053  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1054  else
1055  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1056  }
1057  pkt.dts = pkt.pts;
1058  output_packet(of, &pkt, ost, 0);
1059  }
1060 }
1061 
1062 static void do_video_out(OutputFile *of,
1063  OutputStream *ost,
1064  AVFrame *next_picture,
1065  double sync_ipts)
1066 {
1067  int ret, format_video_sync;
1068  AVPacket pkt;
1069  AVCodecContext *enc = ost->enc_ctx;
1070  AVCodecParameters *mux_par = ost->st->codecpar;
1071  AVRational frame_rate;
1072  int nb_frames, nb0_frames, i;
1073  double delta, delta0;
1074  double duration = 0;
1075  int frame_size = 0;
1076  InputStream *ist = NULL;
1078 
1079  if (ost->source_index >= 0)
1080  ist = input_streams[ost->source_index];
1081 
1082  frame_rate = av_buffersink_get_frame_rate(filter);
1083  if (frame_rate.num > 0 && frame_rate.den > 0)
1084  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1085 
1086  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1087  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1088 
1089  if (!ost->filters_script &&
1090  !ost->filters &&
1091  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1092  next_picture &&
1093  ist &&
1094  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1095  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1096  }
1097 
1098  if (!next_picture) {
1099  //end, flushing
1100  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1101  ost->last_nb0_frames[1],
1102  ost->last_nb0_frames[2]);
1103  } else {
1104  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1105  delta = delta0 + duration;
1106 
1107  /* by default, we output a single frame */
1108  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1109  nb_frames = 1;
1110 
1111  format_video_sync = video_sync_method;
1112  if (format_video_sync == VSYNC_AUTO) {
1113  if(!strcmp(of->ctx->oformat->name, "avi")) {
1114  format_video_sync = VSYNC_VFR;
1115  } else
1116  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1117  if ( ist
1118  && format_video_sync == VSYNC_CFR
1119  && input_files[ist->file_index]->ctx->nb_streams == 1
1120  && input_files[ist->file_index]->input_ts_offset == 0) {
1121  format_video_sync = VSYNC_VSCFR;
1122  }
1123  if (format_video_sync == VSYNC_CFR && copy_ts) {
1124  format_video_sync = VSYNC_VSCFR;
1125  }
1126  }
1127  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1128 
1129  if (delta0 < 0 &&
1130  delta > 0 &&
1131  format_video_sync != VSYNC_PASSTHROUGH &&
1132  format_video_sync != VSYNC_DROP) {
1133  if (delta0 < -0.6) {
1134  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1135  } else
1136  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1137  sync_ipts = ost->sync_opts;
1138  duration += delta0;
1139  delta0 = 0;
1140  }
1141 
1142  switch (format_video_sync) {
1143  case VSYNC_VSCFR:
1144  if (ost->frame_number == 0 && delta0 >= 0.5) {
1145  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1146  delta = duration;
1147  delta0 = 0;
1148  ost->sync_opts = lrint(sync_ipts);
1149  }
1150  case VSYNC_CFR:
1151  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1152  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1153  nb_frames = 0;
1154  } else if (delta < -1.1)
1155  nb_frames = 0;
1156  else if (delta > 1.1) {
1157  nb_frames = lrintf(delta);
1158  if (delta0 > 1.1)
1159  nb0_frames = lrintf(delta0 - 0.6);
1160  }
1161  break;
1162  case VSYNC_VFR:
1163  if (delta <= -0.6)
1164  nb_frames = 0;
1165  else if (delta > 0.6)
1166  ost->sync_opts = lrint(sync_ipts);
1167  break;
1168  case VSYNC_DROP:
1169  case VSYNC_PASSTHROUGH:
1170  ost->sync_opts = lrint(sync_ipts);
1171  break;
1172  default:
1173  av_assert0(0);
1174  }
1175  }
1176 
1177  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1178  nb0_frames = FFMIN(nb0_frames, nb_frames);
1179 
1180  memmove(ost->last_nb0_frames + 1,
1181  ost->last_nb0_frames,
1182  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1183  ost->last_nb0_frames[0] = nb0_frames;
1184 
1185  if (nb0_frames == 0 && ost->last_dropped) {
1186  nb_frames_drop++;
1188  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1189  ost->frame_number, ost->st->index, ost->last_frame->pts);
1190  }
1191  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1192  if (nb_frames > dts_error_threshold * 30) {
1193  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1194  nb_frames_drop++;
1195  return;
1196  }
1197  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1198  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1199  if (nb_frames_dup > dup_warning) {
1200  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1201  dup_warning *= 10;
1202  }
1203  }
1204  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1205 
1206  /* duplicates frame if needed */
1207  for (i = 0; i < nb_frames; i++) {
1208  AVFrame *in_picture;
1209  int forced_keyframe = 0;
1210  double pts_time;
1211  av_init_packet(&pkt);
1212  pkt.data = NULL;
1213  pkt.size = 0;
1214 
1215  if (i < nb0_frames && ost->last_frame) {
1216  in_picture = ost->last_frame;
1217  } else
1218  in_picture = next_picture;
1219 
1220  if (!in_picture)
1221  return;
1222 
1223  in_picture->pts = ost->sync_opts;
1224 
1225  if (!check_recording_time(ost))
1226  return;
1227 
1229  ost->top_field_first >= 0)
1230  in_picture->top_field_first = !!ost->top_field_first;
1231 
1232  if (in_picture->interlaced_frame) {
1233  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1234  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1235  else
1236  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1237  } else
1238  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1239 
1240  in_picture->quality = enc->global_quality;
1241  in_picture->pict_type = 0;
1242 
1243  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1244  in_picture->pts != AV_NOPTS_VALUE)
1245  ost->forced_kf_ref_pts = in_picture->pts;
1246 
1247  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1248  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1249  if (ost->forced_kf_index < ost->forced_kf_count &&
1250  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1251  ost->forced_kf_index++;
1252  forced_keyframe = 1;
1253  } else if (ost->forced_keyframes_pexpr) {
1254  double res;
1255  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1258  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1264  res);
1265  if (res) {
1266  forced_keyframe = 1;
1272  }
1273 
1275  } else if ( ost->forced_keyframes
1276  && !strncmp(ost->forced_keyframes, "source", 6)
1277  && in_picture->key_frame==1
1278  && !i) {
1279  forced_keyframe = 1;
1280  }
1281 
1282  if (forced_keyframe) {
1283  in_picture->pict_type = AV_PICTURE_TYPE_I;
1284  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1285  }
1286 
1288  if (debug_ts) {
1289  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1290  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1291  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1292  enc->time_base.num, enc->time_base.den);
1293  }
1294 
1295  ost->frames_encoded++;
1296 
1297  ret = avcodec_send_frame(enc, in_picture);
1298  if (ret < 0)
1299  goto error;
1300  // Make sure Closed Captions will not be duplicated
1302 
1303  while (1) {
1304  ret = avcodec_receive_packet(enc, &pkt);
1305  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1306  if (ret == AVERROR(EAGAIN))
1307  break;
1308  if (ret < 0)
1309  goto error;
1310 
1311  if (debug_ts) {
1312  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1313  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1314  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1315  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1316  }
1317 
1318  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1319  pkt.pts = ost->sync_opts;
1320 
1321  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1322 
1323  if (debug_ts) {
1324  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1325  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1326  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1327  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1328  }
1329 
1330  frame_size = pkt.size;
1331  output_packet(of, &pkt, ost, 0);
1332 
1333  /* if two pass, output log */
1334  if (ost->logfile && enc->stats_out) {
1335  fprintf(ost->logfile, "%s", enc->stats_out);
1336  }
1337  }
1338  ost->sync_opts++;
1339  /*
1340  * For video, number of frames in == number of packets out.
1341  * But there may be reordering, so we can't throw away frames on encoder
1342  * flush, we need to limit them here, before they go into encoder.
1343  */
1344  ost->frame_number++;
1345 
1346  if (vstats_filename && frame_size)
1347  do_video_stats(ost, frame_size);
1348  }
1349 
1350  if (!ost->last_frame)
1351  ost->last_frame = av_frame_alloc();
1352  av_frame_unref(ost->last_frame);
1353  if (next_picture && ost->last_frame)
1354  av_frame_ref(ost->last_frame, next_picture);
1355  else
1356  av_frame_free(&ost->last_frame);
1357 
1358  return;
1359 error:
1360  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1361  exit_program(1);
1362 }
1363 
1364 static double psnr(double d)
1365 {
1366  return -10.0 * log10(d);
1367 }
1368 
1370 {
1371  AVCodecContext *enc;
1372  int frame_number;
1373  double ti1, bitrate, avg_bitrate;
1374 
1375  /* this is executed just the first time do_video_stats is called */
1376  if (!vstats_file) {
1377  vstats_file = fopen(vstats_filename, "w");
1378  if (!vstats_file) {
1379  perror("fopen");
1380  exit_program(1);
1381  }
1382  }
1383 
1384  enc = ost->enc_ctx;
1385  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1386  frame_number = ost->st->nb_frames;
1387  if (vstats_version <= 1) {
1388  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1389  ost->quality / (float)FF_QP2LAMBDA);
1390  } else {
1391  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1392  ost->quality / (float)FF_QP2LAMBDA);
1393  }
1394 
1395  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1396  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1397 
1398  fprintf(vstats_file,"f_size= %6d ", frame_size);
1399  /* compute pts value */
1400  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1401  if (ti1 < 0.01)
1402  ti1 = 0.01;
1403 
1404  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1405  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1406  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1407  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1408  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1409  }
1410 }
1411 
1412 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1413 
1415 {
1416  OutputFile *of = output_files[ost->file_index];
1417  int i;
1418 
1420 
1421  if (of->shortest) {
1422  for (i = 0; i < of->ctx->nb_streams; i++)
1423  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1424  }
1425 }
1426 
1427 /**
1428  * Get and encode new output from any of the filtergraphs, without causing
1429  * activity.
1430  *
1431  * @return 0 for success, <0 for severe errors
1432  */
1433 static int reap_filters(int flush)
1434 {
1435  AVFrame *filtered_frame = NULL;
1436  int i;
1437 
1438  /* Reap all buffers present in the buffer sinks */
1439  for (i = 0; i < nb_output_streams; i++) {
1440  OutputStream *ost = output_streams[i];
1441  OutputFile *of = output_files[ost->file_index];
1443  AVCodecContext *enc = ost->enc_ctx;
1444  int ret = 0;
1445 
1446  if (!ost->filter || !ost->filter->graph->graph)
1447  continue;
1448  filter = ost->filter->filter;
1449 
1450  if (!ost->initialized) {
1451  char error[1024] = "";
1452  ret = init_output_stream(ost, error, sizeof(error));
1453  if (ret < 0) {
1454  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1455  ost->file_index, ost->index, error);
1456  exit_program(1);
1457  }
1458  }
1459 
1460  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1461  return AVERROR(ENOMEM);
1462  }
1463  filtered_frame = ost->filtered_frame;
1464 
1465  while (1) {
1466  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1467  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1469  if (ret < 0) {
1470  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1472  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1473  } else if (flush && ret == AVERROR_EOF) {
1475  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1476  }
1477  break;
1478  }
1479  if (ost->finished) {
1480  av_frame_unref(filtered_frame);
1481  continue;
1482  }
1483  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1484  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1485  AVRational filter_tb = av_buffersink_get_time_base(filter);
1486  AVRational tb = enc->time_base;
1487  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1488 
1489  tb.den <<= extra_bits;
1490  float_pts =
1491  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1492  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1493  float_pts /= 1 << extra_bits;
1494  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1495  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1496 
1497  filtered_frame->pts =
1498  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1499  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1500  }
1501 
1502  switch (av_buffersink_get_type(filter)) {
1503  case AVMEDIA_TYPE_VIDEO:
1504  if (!ost->frame_aspect_ratio.num)
1505  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1506 
1507  if (debug_ts) {
1508  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1509  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1510  float_pts,
1511  enc->time_base.num, enc->time_base.den);
1512  }
1513 
1514  do_video_out(of, ost, filtered_frame, float_pts);
1515  break;
1516  case AVMEDIA_TYPE_AUDIO:
1517  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1518  enc->channels != filtered_frame->channels) {
1520  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1521  break;
1522  }
1523  do_audio_out(of, ost, filtered_frame);
1524  break;
1525  default:
1526  // TODO support subtitle filters
1527  av_assert0(0);
1528  }
1529 
1530  av_frame_unref(filtered_frame);
1531  }
1532  }
1533 
1534  return 0;
1535 }
1536 
1537 static void print_final_stats(int64_t total_size)
1538 {
1539  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1540  uint64_t subtitle_size = 0;
1541  uint64_t data_size = 0;
1542  float percent = -1.0;
1543  int i, j;
1544  int pass1_used = 1;
1545 
1546  for (i = 0; i < nb_output_streams; i++) {
1547  OutputStream *ost = output_streams[i];
1548  switch (ost->enc_ctx->codec_type) {
1549  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1550  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1551  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1552  default: other_size += ost->data_size; break;
1553  }
1554  extra_size += ost->enc_ctx->extradata_size;
1555  data_size += ost->data_size;
1558  pass1_used = 0;
1559  }
1560 
1561  if (data_size && total_size>0 && total_size >= data_size)
1562  percent = 100.0 * (total_size - data_size) / data_size;
1563 
1564  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1565  video_size / 1024.0,
1566  audio_size / 1024.0,
1567  subtitle_size / 1024.0,
1568  other_size / 1024.0,
1569  extra_size / 1024.0);
1570  if (percent >= 0.0)
1571  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1572  else
1573  av_log(NULL, AV_LOG_INFO, "unknown");
1574  av_log(NULL, AV_LOG_INFO, "\n");
1575 
1576  /* print verbose per-stream stats */
1577  for (i = 0; i < nb_input_files; i++) {
1578  InputFile *f = input_files[i];
1579  uint64_t total_packets = 0, total_size = 0;
1580 
1581  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1582  i, f->ctx->url);
1583 
1584  for (j = 0; j < f->nb_streams; j++) {
1585  InputStream *ist = input_streams[f->ist_index + j];
1586  enum AVMediaType type = ist->dec_ctx->codec_type;
1587 
1588  total_size += ist->data_size;
1589  total_packets += ist->nb_packets;
1590 
1591  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1592  i, j, media_type_string(type));
1593  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1594  ist->nb_packets, ist->data_size);
1595 
1596  if (ist->decoding_needed) {
1597  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1598  ist->frames_decoded);
1599  if (type == AVMEDIA_TYPE_AUDIO)
1600  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1601  av_log(NULL, AV_LOG_VERBOSE, "; ");
1602  }
1603 
1604  av_log(NULL, AV_LOG_VERBOSE, "\n");
1605  }
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1608  total_packets, total_size);
1609  }
1610 
1611  for (i = 0; i < nb_output_files; i++) {
1612  OutputFile *of = output_files[i];
1613  uint64_t total_packets = 0, total_size = 0;
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1616  i, of->ctx->url);
1617 
1618  for (j = 0; j < of->ctx->nb_streams; j++) {
1619  OutputStream *ost = output_streams[of->ost_index + j];
1620  enum AVMediaType type = ost->enc_ctx->codec_type;
1621 
1622  total_size += ost->data_size;
1623  total_packets += ost->packets_written;
1624 
1625  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1626  i, j, media_type_string(type));
1627  if (ost->encoding_needed) {
1628  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1629  ost->frames_encoded);
1630  if (type == AVMEDIA_TYPE_AUDIO)
1631  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1632  av_log(NULL, AV_LOG_VERBOSE, "; ");
1633  }
1634 
1635  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1636  ost->packets_written, ost->data_size);
1637 
1638  av_log(NULL, AV_LOG_VERBOSE, "\n");
1639  }
1640 
1641  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1642  total_packets, total_size);
1643  }
1644  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1645  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1646  if (pass1_used) {
1647  av_log(NULL, AV_LOG_WARNING, "\n");
1648  } else {
1649  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1650  }
1651  }
1652 }
1653 
1654 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1655 {
1656  AVBPrint buf, buf_script;
1657  OutputStream *ost;
1658  AVFormatContext *oc;
1659  int64_t total_size;
1660  AVCodecContext *enc;
1661  int frame_number, vid, i;
1662  double bitrate;
1663  double speed;
1664  int64_t pts = INT64_MIN + 1;
1665  static int64_t last_time = -1;
1666  static int qp_histogram[52];
1667  int hours, mins, secs, us;
1668  const char *hours_sign;
1669  int ret;
1670  float t;
1671 
1672  if (!print_stats && !is_last_report && !progress_avio)
1673  return;
1674 
1675  if (!is_last_report) {
1676  if (last_time == -1) {
1677  last_time = cur_time;
1678  return;
1679  }
1680  if ((cur_time - last_time) < 500000)
1681  return;
1682  last_time = cur_time;
1683  }
1684 
1685  t = (cur_time-timer_start) / 1000000.0;
1686 
1687 
1688  oc = output_files[0]->ctx;
1689 
1690  total_size = avio_size(oc->pb);
1691  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1692  total_size = avio_tell(oc->pb);
1693 
1694  vid = 0;
1696  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1697  for (i = 0; i < nb_output_streams; i++) {
1698  float q = -1;
1699  ost = output_streams[i];
1700  enc = ost->enc_ctx;
1701  if (!ost->stream_copy)
1702  q = ost->quality / (float) FF_QP2LAMBDA;
1703 
1704  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1705  av_bprintf(&buf, "q=%2.1f ", q);
1706  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1707  ost->file_index, ost->index, q);
1708  }
1709  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1710  float fps;
1711 
1712  frame_number = ost->frame_number;
1713  fps = t > 1 ? frame_number / t : 0;
1714  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1715  frame_number, fps < 9.95, fps, q);
1716  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1717  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1718  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1719  ost->file_index, ost->index, q);
1720  if (is_last_report)
1721  av_bprintf(&buf, "L");
1722  if (qp_hist) {
1723  int j;
1724  int qp = lrintf(q);
1725  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1726  qp_histogram[qp]++;
1727  for (j = 0; j < 32; j++)
1728  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1729  }
1730 
1731  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1732  int j;
1733  double error, error_sum = 0;
1734  double scale, scale_sum = 0;
1735  double p;
1736  char type[3] = { 'Y','U','V' };
1737  av_bprintf(&buf, "PSNR=");
1738  for (j = 0; j < 3; j++) {
1739  if (is_last_report) {
1740  error = enc->error[j];
1741  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1742  } else {
1743  error = ost->error[j];
1744  scale = enc->width * enc->height * 255.0 * 255.0;
1745  }
1746  if (j)
1747  scale /= 4;
1748  error_sum += error;
1749  scale_sum += scale;
1750  p = psnr(error / scale);
1751  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1752  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1753  ost->file_index, ost->index, type[j] | 32, p);
1754  }
1755  p = psnr(error_sum / scale_sum);
1756  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1757  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1758  ost->file_index, ost->index, p);
1759  }
1760  vid = 1;
1761  }
1762  /* compute min output value */
1764  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1765  ost->st->time_base, AV_TIME_BASE_Q));
1766  if (is_last_report)
1767  nb_frames_drop += ost->last_dropped;
1768  }
1769 
1770  secs = FFABS(pts) / AV_TIME_BASE;
1771  us = FFABS(pts) % AV_TIME_BASE;
1772  mins = secs / 60;
1773  secs %= 60;
1774  hours = mins / 60;
1775  mins %= 60;
1776  hours_sign = (pts < 0) ? "-" : "";
1777 
1778  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1779  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1780 
1781  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1782  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1783  if (pts == AV_NOPTS_VALUE) {
1784  av_bprintf(&buf, "N/A ");
1785  } else {
1786  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1787  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1788  }
1789 
1790  if (bitrate < 0) {
1791  av_bprintf(&buf, "bitrate=N/A");
1792  av_bprintf(&buf_script, "bitrate=N/A\n");
1793  }else{
1794  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1795  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1796  }
1797 
1798  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1799  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1800  if (pts == AV_NOPTS_VALUE) {
1801  av_bprintf(&buf_script, "out_time_us=N/A\n");
1802  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1803  av_bprintf(&buf_script, "out_time=N/A\n");
1804  } else {
1805  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1806  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1807  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1808  hours_sign, hours, mins, secs, us);
1809  }
1810 
1812  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1813  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1814  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1815 
1816  if (speed < 0) {
1817  av_bprintf(&buf, " speed=N/A");
1818  av_bprintf(&buf_script, "speed=N/A\n");
1819  } else {
1820  av_bprintf(&buf, " speed=%4.3gx", speed);
1821  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1822  }
1823 
1824  if (print_stats || is_last_report) {
1825  const char end = is_last_report ? '\n' : '\r';
1826  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1827  fprintf(stderr, "%s %c", buf.str, end);
1828  } else
1829  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1830 
1831  fflush(stderr);
1832  }
1833  av_bprint_finalize(&buf, NULL);
1834 
1835  if (progress_avio) {
1836  av_bprintf(&buf_script, "progress=%s\n",
1837  is_last_report ? "end" : "continue");
1838  avio_write(progress_avio, buf_script.str,
1839  FFMIN(buf_script.len, buf_script.size - 1));
1840  avio_flush(progress_avio);
1841  av_bprint_finalize(&buf_script, NULL);
1842  if (is_last_report) {
1843  if ((ret = avio_closep(&progress_avio)) < 0)
1845  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1846  }
1847  }
1848 
1849  if (is_last_report)
1850  print_final_stats(total_size);
1851 }
1852 
1854 {
1855  // We never got any input. Set a fake format, which will
1856  // come from libavformat.
1857  ifilter->format = par->format;
1858  ifilter->sample_rate = par->sample_rate;
1859  ifilter->channels = par->channels;
1860  ifilter->channel_layout = par->channel_layout;
1861  ifilter->width = par->width;
1862  ifilter->height = par->height;
1863  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1864 }
1865 
1866 static void flush_encoders(void)
1867 {
1868  int i, ret;
1869 
1870  for (i = 0; i < nb_output_streams; i++) {
1871  OutputStream *ost = output_streams[i];
1872  AVCodecContext *enc = ost->enc_ctx;
1873  OutputFile *of = output_files[ost->file_index];
1874 
1875  if (!ost->encoding_needed)
1876  continue;
1877 
1878  // Try to enable encoding with no input frames.
1879  // Maybe we should just let encoding fail instead.
1880  if (!ost->initialized) {
1881  FilterGraph *fg = ost->filter->graph;
1882  char error[1024] = "";
1883 
1885  "Finishing stream %d:%d without any data written to it.\n",
1886  ost->file_index, ost->st->index);
1887 
1888  if (ost->filter && !fg->graph) {
1889  int x;
1890  for (x = 0; x < fg->nb_inputs; x++) {
1891  InputFilter *ifilter = fg->inputs[x];
1892  if (ifilter->format < 0)
1893  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1894  }
1895 
1897  continue;
1898 
1899  ret = configure_filtergraph(fg);
1900  if (ret < 0) {
1901  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1902  exit_program(1);
1903  }
1904 
1905  finish_output_stream(ost);
1906  }
1907 
1908  ret = init_output_stream(ost, error, sizeof(error));
1909  if (ret < 0) {
1910  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1911  ost->file_index, ost->index, error);
1912  exit_program(1);
1913  }
1914  }
1915 
1916  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1917  continue;
1918 
1920  continue;
1921 
1922  for (;;) {
1923  const char *desc = NULL;
1924  AVPacket pkt;
1925  int pkt_size;
1926 
1927  switch (enc->codec_type) {
1928  case AVMEDIA_TYPE_AUDIO:
1929  desc = "audio";
1930  break;
1931  case AVMEDIA_TYPE_VIDEO:
1932  desc = "video";
1933  break;
1934  default:
1935  av_assert0(0);
1936  }
1937 
1938  av_init_packet(&pkt);
1939  pkt.data = NULL;
1940  pkt.size = 0;
1941 
1943 
1944  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1945  ret = avcodec_send_frame(enc, NULL);
1946  if (ret < 0) {
1947  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948  desc,
1949  av_err2str(ret));
1950  exit_program(1);
1951  }
1952  }
1953 
1954  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1955  if (ret < 0 && ret != AVERROR_EOF) {
1956  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1957  desc,
1958  av_err2str(ret));
1959  exit_program(1);
1960  }
1961  if (ost->logfile && enc->stats_out) {
1962  fprintf(ost->logfile, "%s", enc->stats_out);
1963  }
1964  if (ret == AVERROR_EOF) {
1965  output_packet(of, &pkt, ost, 1);
1966  break;
1967  }
1968  if (ost->finished & MUXER_FINISHED) {
1969  av_packet_unref(&pkt);
1970  continue;
1971  }
1972  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1973  pkt_size = pkt.size;
1974  output_packet(of, &pkt, ost, 0);
1976  do_video_stats(ost, pkt_size);
1977  }
1978  }
1979  }
1980 }
1981 
1982 /*
1983  * Check whether a packet from ist should be written into ost at this time
1984  */
1986 {
1987  OutputFile *of = output_files[ost->file_index];
1988  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1989 
1990  if (ost->source_index != ist_index)
1991  return 0;
1992 
1993  if (ost->finished)
1994  return 0;
1995 
1996  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1997  return 0;
1998 
1999  return 1;
2000 }
2001 
2002 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2003 {
2004  OutputFile *of = output_files[ost->file_index];
2005  InputFile *f = input_files [ist->file_index];
2006  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2007  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2008  AVPacket opkt;
2009 
2010  // EOF: flush output bitstream filters.
2011  if (!pkt) {
2012  av_init_packet(&opkt);
2013  opkt.data = NULL;
2014  opkt.size = 0;
2015  output_packet(of, &opkt, ost, 1);
2016  return;
2017  }
2018 
2019  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2021  return;
2022 
2023  if (!ost->frame_number && !ost->copy_prior_start) {
2024  int64_t comp_start = start_time;
2025  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2026  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2027  if (pkt->pts == AV_NOPTS_VALUE ?
2028  ist->pts < comp_start :
2029  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2030  return;
2031  }
2032 
2033  if (of->recording_time != INT64_MAX &&
2034  ist->pts >= of->recording_time + start_time) {
2035  close_output_stream(ost);
2036  return;
2037  }
2038 
2039  if (f->recording_time != INT64_MAX) {
2040  start_time = f->ctx->start_time;
2041  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2042  start_time += f->start_time;
2043  if (ist->pts >= f->recording_time + start_time) {
2044  close_output_stream(ost);
2045  return;
2046  }
2047  }
2048 
2049  /* force the input stream PTS */
2050  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2051  ost->sync_opts++;
2052 
2053  if (av_packet_ref(&opkt, pkt) < 0)
2054  exit_program(1);
2055 
2056  if (pkt->pts != AV_NOPTS_VALUE)
2057  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2058 
2059  if (pkt->dts == AV_NOPTS_VALUE) {
2060  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2061  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2063  if(!duration)
2064  duration = ist->dec_ctx->frame_size;
2065  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2066  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2068  /* dts will be set immediately afterwards to what pts is now */
2069  opkt.pts = opkt.dts - ost_tb_start_time;
2070  } else
2071  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2072  opkt.dts -= ost_tb_start_time;
2073 
2074  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2075 
2076  output_packet(of, &opkt, ost, 0);
2077 }
2078 
2080 {
2081  AVCodecContext *dec = ist->dec_ctx;
2082 
2083  if (!dec->channel_layout) {
2084  char layout_name[256];
2085 
2086  if (dec->channels > ist->guess_layout_max)
2087  return 0;
2089  if (!dec->channel_layout)
2090  return 0;
2091  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2092  dec->channels, dec->channel_layout);
2093  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2094  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2095  }
2096  return 1;
2097 }
2098 
2099 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2100 {
2101  if (*got_output || ret<0)
2102  decode_error_stat[ret<0] ++;
2103 
2104  if (ret < 0 && exit_on_error)
2105  exit_program(1);
2106 
2107  if (*got_output && ist) {
2110  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2111  if (exit_on_error)
2112  exit_program(1);
2113  }
2114  }
2115 }
2116 
2117 // Filters can be configured only if the formats of all inputs are known.
2119 {
2120  int i;
2121  for (i = 0; i < fg->nb_inputs; i++) {
2122  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2123  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2124  return 0;
2125  }
2126  return 1;
2127 }
2128 
2130 {
2131  FilterGraph *fg = ifilter->graph;
2132  int need_reinit, ret, i;
2133 
2134  /* determine if the parameters for this input changed */
2135  need_reinit = ifilter->format != frame->format;
2136 
2137  switch (ifilter->ist->st->codecpar->codec_type) {
2138  case AVMEDIA_TYPE_AUDIO:
2139  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2140  ifilter->channels != frame->channels ||
2141  ifilter->channel_layout != frame->channel_layout;
2142  break;
2143  case AVMEDIA_TYPE_VIDEO:
2144  need_reinit |= ifilter->width != frame->width ||
2145  ifilter->height != frame->height;
2146  break;
2147  }
2148 
2149  if (!ifilter->ist->reinit_filters && fg->graph)
2150  need_reinit = 0;
2151 
2152  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2153  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2154  need_reinit = 1;
2155 
2156  if (need_reinit) {
2157  ret = ifilter_parameters_from_frame(ifilter, frame);
2158  if (ret < 0)
2159  return ret;
2160  }
2161 
2162  /* (re)init the graph if possible, otherwise buffer the frame and return */
2163  if (need_reinit || !fg->graph) {
2164  for (i = 0; i < fg->nb_inputs; i++) {
2165  if (!ifilter_has_all_input_formats(fg)) {
2166  AVFrame *tmp = av_frame_clone(frame);
2167  if (!tmp)
2168  return AVERROR(ENOMEM);
2169  av_frame_unref(frame);
2170 
2171  if (!av_fifo_space(ifilter->frame_queue)) {
2172  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2173  if (ret < 0) {
2174  av_frame_free(&tmp);
2175  return ret;
2176  }
2177  }
2178  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2179  return 0;
2180  }
2181  }
2182 
2183  ret = reap_filters(1);
2184  if (ret < 0 && ret != AVERROR_EOF) {
2185  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2186  return ret;
2187  }
2188 
2189  ret = configure_filtergraph(fg);
2190  if (ret < 0) {
2191  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2192  return ret;
2193  }
2194  }
2195 
2197  if (ret < 0) {
2198  if (ret != AVERROR_EOF)
2199  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2200  return ret;
2201  }
2202 
2203  return 0;
2204 }
2205 
2206 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2207 {
2208  int ret;
2209 
2210  ifilter->eof = 1;
2211 
2212  if (ifilter->filter) {
2213  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2214  if (ret < 0)
2215  return ret;
2216  } else {
2217  // the filtergraph was never configured
2218  if (ifilter->format < 0)
2219  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2220  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2221  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2222  return AVERROR_INVALIDDATA;
2223  }
2224  }
2225 
2226  return 0;
2227 }
2228 
2229 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2230 // There is the following difference: if you got a frame, you must call
2231 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2232 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2233 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2234 {
2235  int ret;
2236 
2237  *got_frame = 0;
2238 
2239  if (pkt) {
2240  ret = avcodec_send_packet(avctx, pkt);
2241  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2242  // decoded frames with avcodec_receive_frame() until done.
2243  if (ret < 0 && ret != AVERROR_EOF)
2244  return ret;
2245  }
2246 
2247  ret = avcodec_receive_frame(avctx, frame);
2248  if (ret < 0 && ret != AVERROR(EAGAIN))
2249  return ret;
2250  if (ret >= 0)
2251  *got_frame = 1;
2252 
2253  return 0;
2254 }
2255 
2256 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2257 {
2258  int i, ret;
2259  AVFrame *f;
2260 
2261  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2262  for (i = 0; i < ist->nb_filters; i++) {
2263  if (i < ist->nb_filters - 1) {
2264  f = ist->filter_frame;
2265  ret = av_frame_ref(f, decoded_frame);
2266  if (ret < 0)
2267  break;
2268  } else
2269  f = decoded_frame;
2270  ret = ifilter_send_frame(ist->filters[i], f);
2271  if (ret == AVERROR_EOF)
2272  ret = 0; /* ignore */
2273  if (ret < 0) {
2275  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2276  break;
2277  }
2278  }
2279  return ret;
2280 }
2281 
2282 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2283  int *decode_failed)
2284 {
2285  AVFrame *decoded_frame;
2286  AVCodecContext *avctx = ist->dec_ctx;
2287  int ret, err = 0;
2288  AVRational decoded_frame_tb;
2289 
2290  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2291  return AVERROR(ENOMEM);
2292  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2293  return AVERROR(ENOMEM);
2294  decoded_frame = ist->decoded_frame;
2295 
2297  ret = decode(avctx, decoded_frame, got_output, pkt);
2298  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2299  if (ret < 0)
2300  *decode_failed = 1;
2301 
2302  if (ret >= 0 && avctx->sample_rate <= 0) {
2303  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2304  ret = AVERROR_INVALIDDATA;
2305  }
2306 
2307  if (ret != AVERROR_EOF)
2308  check_decode_result(ist, got_output, ret);
2309 
2310  if (!*got_output || ret < 0)
2311  return ret;
2312 
2313  ist->samples_decoded += decoded_frame->nb_samples;
2314  ist->frames_decoded++;
2315 
2316  /* increment next_dts to use for the case where the input stream does not
2317  have timestamps or there are multiple frames in the packet */
2318  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2319  avctx->sample_rate;
2320  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2321  avctx->sample_rate;
2322 
2323  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324  decoded_frame_tb = ist->st->time_base;
2325  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326  decoded_frame->pts = pkt->pts;
2327  decoded_frame_tb = ist->st->time_base;
2328  }else {
2329  decoded_frame->pts = ist->dts;
2330  decoded_frame_tb = AV_TIME_BASE_Q;
2331  }
2332  if (decoded_frame->pts != AV_NOPTS_VALUE)
2333  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335  (AVRational){1, avctx->sample_rate});
2336  ist->nb_samples = decoded_frame->nb_samples;
2337  err = send_frame_to_filters(ist, decoded_frame);
2338 
2340  av_frame_unref(decoded_frame);
2341  return err < 0 ? err : ret;
2342 }
2343 
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2345  int *decode_failed)
2346 {
2347  AVFrame *decoded_frame;
2348  int i, ret = 0, err = 0;
2349  int64_t best_effort_timestamp;
2350  int64_t dts = AV_NOPTS_VALUE;
2351  AVPacket avpkt;
2352 
2353  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2355  // skip the packet.
2356  if (!eof && pkt && pkt->size == 0)
2357  return 0;
2358 
2359  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360  return AVERROR(ENOMEM);
2361  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362  return AVERROR(ENOMEM);
2363  decoded_frame = ist->decoded_frame;
2364  if (ist->dts != AV_NOPTS_VALUE)
2365  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2366  if (pkt) {
2367  avpkt = *pkt;
2368  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2369  }
2370 
2371  // The old code used to set dts on the drain packet, which does not work
2372  // with the new API anymore.
2373  if (eof) {
2374  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2375  if (!new)
2376  return AVERROR(ENOMEM);
2377  ist->dts_buffer = new;
2378  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2379  }
2380 
2382  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2384  if (ret < 0)
2385  *decode_failed = 1;
2386 
2387  // The following line may be required in some cases where there is no parser
2388  // or the parser does not has_b_frames correctly
2389  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2392  } else
2394  "video_delay is larger in decoder than demuxer %d > %d.\n"
2395  "If you want to help, upload a sample "
2396  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398  ist->dec_ctx->has_b_frames,
2399  ist->st->codecpar->video_delay);
2400  }
2401 
2402  if (ret != AVERROR_EOF)
2403  check_decode_result(ist, got_output, ret);
2404 
2405  if (*got_output && ret >= 0) {
2406  if (ist->dec_ctx->width != decoded_frame->width ||
2407  ist->dec_ctx->height != decoded_frame->height ||
2408  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410  decoded_frame->width,
2411  decoded_frame->height,
2412  decoded_frame->format,
2413  ist->dec_ctx->width,
2414  ist->dec_ctx->height,
2415  ist->dec_ctx->pix_fmt);
2416  }
2417  }
2418 
2419  if (!*got_output || ret < 0)
2420  return ret;
2421 
2422  if(ist->top_field_first>=0)
2423  decoded_frame->top_field_first = ist->top_field_first;
2424 
2425  ist->frames_decoded++;
2426 
2427  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2429  if (err < 0)
2430  goto fail;
2431  }
2432  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2433 
2434  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435  *duration_pts = decoded_frame->pkt_duration;
2436 
2437  if (ist->framerate.num)
2438  best_effort_timestamp = ist->cfr_next_pts++;
2439 
2440  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441  best_effort_timestamp = ist->dts_buffer[0];
2442 
2443  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445  ist->nb_dts_buffer--;
2446  }
2447 
2448  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2450 
2451  if (ts != AV_NOPTS_VALUE)
2452  ist->next_pts = ist->pts = ts;
2453  }
2454 
2455  if (debug_ts) {
2456  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458  ist->st->index, av_ts2str(decoded_frame->pts),
2459  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460  best_effort_timestamp,
2461  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462  decoded_frame->key_frame, decoded_frame->pict_type,
2463  ist->st->time_base.num, ist->st->time_base.den);
2464  }
2465 
2466  if (ist->st->sample_aspect_ratio.num)
2467  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2468 
2469  err = send_frame_to_filters(ist, decoded_frame);
2470 
2471 fail:
2473  av_frame_unref(decoded_frame);
2474  return err < 0 ? err : ret;
2475 }
2476 
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2478  int *decode_failed)
2479 {
2480  AVSubtitle subtitle;
2481  int free_sub = 1;
2482  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483  &subtitle, got_output, pkt);
2484 
2485  check_decode_result(NULL, got_output, ret);
2486 
2487  if (ret < 0 || !*got_output) {
2488  *decode_failed = 1;
2489  if (!pkt->size)
2490  sub2video_flush(ist);
2491  return ret;
2492  }
2493 
2494  if (ist->fix_sub_duration) {
2495  int end = 1;
2496  if (ist->prev_sub.got_output) {
2497  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498  1000, AV_TIME_BASE);
2499  if (end < ist->prev_sub.subtitle.end_display_time) {
2500  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2503  end <= 0 ? ", dropping it" : "");
2505  }
2506  }
2507  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508  FFSWAP(int, ret, ist->prev_sub.ret);
2509  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2510  if (end <= 0)
2511  goto out;
2512  }
2513 
2514  if (!*got_output)
2515  return ret;
2516 
2517  if (ist->sub2video.frame) {
2518  sub2video_update(ist, INT64_MIN, &subtitle);
2519  } else if (ist->nb_filters) {
2520  if (!ist->sub2video.sub_queue)
2521  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522  if (!ist->sub2video.sub_queue)
2523  exit_program(1);
2524  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2526  if (ret < 0)
2527  exit_program(1);
2528  }
2529  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2530  free_sub = 0;
2531  }
2532 
2533  if (!subtitle.num_rects)
2534  goto out;
2535 
2536  ist->frames_decoded++;
2537 
2538  for (i = 0; i < nb_output_streams; i++) {
2539  OutputStream *ost = output_streams[i];
2540 
2541  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2543  continue;
2544 
2545  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2546  }
2547 
2548 out:
2549  if (free_sub)
2550  avsubtitle_free(&subtitle);
2551  return ret;
2552 }
2553 
2555 {
2556  int i, ret;
2557  /* TODO keep pts also in stream time base to avoid converting back */
2558  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2560 
2561  for (i = 0; i < ist->nb_filters; i++) {
2562  ret = ifilter_send_eof(ist->filters[i], pts);
2563  if (ret < 0)
2564  return ret;
2565  }
2566  return 0;
2567 }
2568 
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2571 {
2572  int ret = 0, i;
2573  int repeating = 0;
2574  int eof_reached = 0;
2575 
2576  AVPacket avpkt;
2577  if (!ist->saw_first_ts) {
2578  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2579  ist->pts = 0;
2580  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2583  }
2584  ist->saw_first_ts = 1;
2585  }
2586 
2587  if (ist->next_dts == AV_NOPTS_VALUE)
2588  ist->next_dts = ist->dts;
2589  if (ist->next_pts == AV_NOPTS_VALUE)
2590  ist->next_pts = ist->pts;
2591 
2592  if (!pkt) {
2593  /* EOF handling */
2594  av_init_packet(&avpkt);
2595  avpkt.data = NULL;
2596  avpkt.size = 0;
2597  } else {
2598  avpkt = *pkt;
2599  }
2600 
2601  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604  ist->next_pts = ist->pts = ist->dts;
2605  }
2606 
2607  // while we have more to decode or while the decoder did output something on EOF
2608  while (ist->decoding_needed) {
2609  int64_t duration_dts = 0;
2610  int64_t duration_pts = 0;
2611  int got_output = 0;
2612  int decode_failed = 0;
2613 
2614  ist->pts = ist->next_pts;
2615  ist->dts = ist->next_dts;
2616 
2617  switch (ist->dec_ctx->codec_type) {
2618  case AVMEDIA_TYPE_AUDIO:
2619  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2620  &decode_failed);
2621  break;
2622  case AVMEDIA_TYPE_VIDEO:
2623  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2624  &decode_failed);
2625  if (!repeating || !pkt || got_output) {
2626  if (pkt && pkt->duration) {
2627  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2630  duration_dts = ((int64_t)AV_TIME_BASE *
2631  ist->dec_ctx->framerate.den * ticks) /
2633  }
2634 
2635  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636  ist->next_dts += duration_dts;
2637  }else
2638  ist->next_dts = AV_NOPTS_VALUE;
2639  }
2640 
2641  if (got_output) {
2642  if (duration_pts > 0) {
2643  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2644  } else {
2645  ist->next_pts += duration_dts;
2646  }
2647  }
2648  break;
2649  case AVMEDIA_TYPE_SUBTITLE:
2650  if (repeating)
2651  break;
2652  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653  if (!pkt && ret >= 0)
2654  ret = AVERROR_EOF;
2655  break;
2656  default:
2657  return -1;
2658  }
2659 
2660  if (ret == AVERROR_EOF) {
2661  eof_reached = 1;
2662  break;
2663  }
2664 
2665  if (ret < 0) {
2666  if (decode_failed) {
2667  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668  ist->file_index, ist->st->index, av_err2str(ret));
2669  } else {
2670  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2672  }
2673  if (!decode_failed || exit_on_error)
2674  exit_program(1);
2675  break;
2676  }
2677 
2678  if (got_output)
2679  ist->got_output = 1;
2680 
2681  if (!got_output)
2682  break;
2683 
2684  // During draining, we might get multiple output frames in this loop.
2685  // ffmpeg.c does not drain the filter chain on configuration changes,
2686  // which means if we send multiple frames at once to the filters, and
2687  // one of those frames changes configuration, the buffered frames will
2688  // be lost. This can upset certain FATE tests.
2689  // Decode only 1 frame per call on EOF to appease these FATE tests.
2690  // The ideal solution would be to rewrite decoding to use the new
2691  // decoding API in a better way.
2692  if (!pkt)
2693  break;
2694 
2695  repeating = 1;
2696  }
2697 
2698  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699  /* except when looping we need to flush but not to send an EOF */
2700  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701  int ret = send_filter_eof(ist);
2702  if (ret < 0) {
2703  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2704  exit_program(1);
2705  }
2706  }
2707 
2708  /* handle stream copy */
2709  if (!ist->decoding_needed && pkt) {
2710  ist->dts = ist->next_dts;
2711  switch (ist->dec_ctx->codec_type) {
2712  case AVMEDIA_TYPE_AUDIO:
2713  av_assert1(pkt->duration >= 0);
2714  if (ist->dec_ctx->sample_rate) {
2715  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2716  ist->dec_ctx->sample_rate;
2717  } else {
2718  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2719  }
2720  break;
2721  case AVMEDIA_TYPE_VIDEO:
2722  if (ist->framerate.num) {
2723  // TODO: Remove work-around for c99-to-c89 issue 7
2724  AVRational time_base_q = AV_TIME_BASE_Q;
2725  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727  } else if (pkt->duration) {
2728  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729  } else if(ist->dec_ctx->framerate.num != 0) {
2730  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731  ist->next_dts += ((int64_t)AV_TIME_BASE *
2732  ist->dec_ctx->framerate.den * ticks) /
2734  }
2735  break;
2736  }
2737  ist->pts = ist->dts;
2738  ist->next_pts = ist->next_dts;
2739  }
2740  for (i = 0; i < nb_output_streams; i++) {
2741  OutputStream *ost = output_streams[i];
2742 
2743  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2744  continue;
2745 
2746  do_streamcopy(ist, ost, pkt);
2747  }
2748 
2749  return !eof_reached;
2750 }
2751 
2752 static void print_sdp(void)
2753 {
2754  char sdp[16384];
2755  int i;
2756  int j;
2757  AVIOContext *sdp_pb;
2758  AVFormatContext **avc;
2759 
2760  for (i = 0; i < nb_output_files; i++) {
2761  if (!output_files[i]->header_written)
2762  return;
2763  }
2764 
2765  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2766  if (!avc)
2767  exit_program(1);
2768  for (i = 0, j = 0; i < nb_output_files; i++) {
2769  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770  avc[j] = output_files[i]->ctx;
2771  j++;
2772  }
2773  }
2774 
2775  if (!j)
2776  goto fail;
2777 
2778  av_sdp_create(avc, j, sdp, sizeof(sdp));
2779 
2780  if (!sdp_filename) {
2781  printf("SDP:\n%s\n", sdp);
2782  fflush(stdout);
2783  } else {
2784  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2786  } else {
2787  avio_print(sdp_pb, sdp);
2788  avio_closep(&sdp_pb);
2790  }
2791  }
2792 
2793 fail:
2794  av_freep(&avc);
2795 }
2796 
2798 {
2799  InputStream *ist = s->opaque;
2800  const enum AVPixelFormat *p;
2801  int ret;
2802 
2803  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2805  const AVCodecHWConfig *config = NULL;
2806  int i;
2807 
2808  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2809  break;
2810 
2811  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2812  ist->hwaccel_id == HWACCEL_AUTO) {
2813  for (i = 0;; i++) {
2814  config = avcodec_get_hw_config(s->codec, i);
2815  if (!config)
2816  break;
2817  if (!(config->methods &
2819  continue;
2820  if (config->pix_fmt == *p)
2821  break;
2822  }
2823  }
2824  if (config) {
2825  if (config->device_type != ist->hwaccel_device_type) {
2826  // Different hwaccel offered, ignore.
2827  continue;
2828  }
2829 
2830  ret = hwaccel_decode_init(s);
2831  if (ret < 0) {
2832  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2834  "%s hwaccel requested for input stream #%d:%d, "
2835  "but cannot be initialized.\n",
2837  ist->file_index, ist->st->index);
2838  return AV_PIX_FMT_NONE;
2839  }
2840  continue;
2841  }
2842  } else {
2843  const HWAccel *hwaccel = NULL;
2844  int i;
2845  for (i = 0; hwaccels[i].name; i++) {
2846  if (hwaccels[i].pix_fmt == *p) {
2847  hwaccel = &hwaccels[i];
2848  break;
2849  }
2850  }
2851  if (!hwaccel) {
2852  // No hwaccel supporting this pixfmt.
2853  continue;
2854  }
2855  if (hwaccel->id != ist->hwaccel_id) {
2856  // Does not match requested hwaccel.
2857  continue;
2858  }
2859 
2860  ret = hwaccel->init(s);
2861  if (ret < 0) {
2863  "%s hwaccel requested for input stream #%d:%d, "
2864  "but cannot be initialized.\n", hwaccel->name,
2865  ist->file_index, ist->st->index);
2866  return AV_PIX_FMT_NONE;
2867  }
2868  }
2869 
2870  if (ist->hw_frames_ctx) {
2872  if (!s->hw_frames_ctx)
2873  return AV_PIX_FMT_NONE;
2874  }
2875 
2876  ist->hwaccel_pix_fmt = *p;
2877  break;
2878  }
2879 
2880  return *p;
2881 }
2882 
2884 {
2885  InputStream *ist = s->opaque;
2886 
2887  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2888  return ist->hwaccel_get_buffer(s, frame, flags);
2889 
2890  return avcodec_default_get_buffer2(s, frame, flags);
2891 }
2892 
2893 static int init_input_stream(int ist_index, char *error, int error_len)
2894 {
2895  int ret;
2896  InputStream *ist = input_streams[ist_index];
2897 
2898  if (ist->decoding_needed) {
2899  AVCodec *codec = ist->dec;
2900  if (!codec) {
2901  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2902  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2903  return AVERROR(EINVAL);
2904  }
2905 
2906  ist->dec_ctx->opaque = ist;
2907  ist->dec_ctx->get_format = get_format;
2908  ist->dec_ctx->get_buffer2 = get_buffer;
2909  ist->dec_ctx->thread_safe_callbacks = 1;
2910 
2911  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2912  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2913  (ist->decoding_needed & DECODING_FOR_OST)) {
2914  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2916  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2917  }
2918 
2919  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2920 
2921  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2922  * audio, and video decoders such as cuvid or mediacodec */
2923  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2924 
2925  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2926  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2927  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2929  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2930 
2931  ret = hw_device_setup_for_decode(ist);
2932  if (ret < 0) {
2933  snprintf(error, error_len, "Device setup failed for "
2934  "decoder on input stream #%d:%d : %s",
2935  ist->file_index, ist->st->index, av_err2str(ret));
2936  return ret;
2937  }
2938 
2939  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2940  if (ret == AVERROR_EXPERIMENTAL)
2941  abort_codec_experimental(codec, 0);
2942 
2943  snprintf(error, error_len,
2944  "Error while opening decoder for input stream "
2945  "#%d:%d : %s",
2946  ist->file_index, ist->st->index, av_err2str(ret));
2947  return ret;
2948  }
2950  }
2951 
2952  ist->next_pts = AV_NOPTS_VALUE;
2953  ist->next_dts = AV_NOPTS_VALUE;
2954 
2955  return 0;
2956 }
2957 
2959 {
2960  if (ost->source_index >= 0)
2961  return input_streams[ost->source_index];
2962  return NULL;
2963 }
2964 
2965 static int compare_int64(const void *a, const void *b)
2966 {
2967  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2968 }
2969 
2970 /* open the muxer when all the streams are initialized */
2971 static int check_init_output_file(OutputFile *of, int file_index)
2972 {
2973  int ret, i;
2974 
2975  for (i = 0; i < of->ctx->nb_streams; i++) {
2976  OutputStream *ost = output_streams[of->ost_index + i];
2977  if (!ost->initialized)
2978  return 0;
2979  }
2980 
2981  of->ctx->interrupt_callback = int_cb;
2982 
2983  ret = avformat_write_header(of->ctx, &of->opts);
2984  if (ret < 0) {
2986  "Could not write header for output file #%d "
2987  "(incorrect codec parameters ?): %s\n",
2988  file_index, av_err2str(ret));
2989  return ret;
2990  }
2991  //assert_avoptions(of->opts);
2992  of->header_written = 1;
2993 
2994  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2995 
2996  if (sdp_filename || want_sdp)
2997  print_sdp();
2998 
2999  /* flush the muxing queues */
3000  for (i = 0; i < of->ctx->nb_streams; i++) {
3001  OutputStream *ost = output_streams[of->ost_index + i];
3002 
3003  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3004  if (!av_fifo_size(ost->muxing_queue))
3005  ost->mux_timebase = ost->st->time_base;
3006 
3007  while (av_fifo_size(ost->muxing_queue)) {
3008  AVPacket pkt;
3009  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3010  write_packet(of, &pkt, ost, 1);
3011  }
3012  }
3013 
3014  return 0;
3015 }
3016 
3018 {
3019  AVBSFContext *ctx;
3020  int i, ret;
3021 
3022  if (!ost->nb_bitstream_filters)
3023  return 0;
3024 
3025  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3026  ctx = ost->bsf_ctx[i];
3027 
3028  ret = avcodec_parameters_copy(ctx->par_in,
3029  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3030  if (ret < 0)
3031  return ret;
3032 
3033  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3034 
3035  ret = av_bsf_init(ctx);
3036  if (ret < 0) {
3037  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3038  ost->bsf_ctx[i]->filter->name);
3039  return ret;
3040  }
3041  }
3042 
3043  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3044  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3045  if (ret < 0)
3046  return ret;
3047 
3048  ost->st->time_base = ctx->time_base_out;
3049 
3050  return 0;
3051 }
3052 
3054 {
3055  OutputFile *of = output_files[ost->file_index];
3056  InputStream *ist = get_input_stream(ost);
3057  AVCodecParameters *par_dst = ost->st->codecpar;
3058  AVCodecParameters *par_src = ost->ref_par;
3059  AVRational sar;
3060  int i, ret;
3061  uint32_t codec_tag = par_dst->codec_tag;
3062 
3063  av_assert0(ist && !ost->filter);
3064 
3065  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3066  if (ret >= 0)
3067  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3068  if (ret < 0) {
3070  "Error setting up codec context options.\n");
3071  return ret;
3072  }
3073 
3074  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3075  if (ret < 0) {
3077  "Error getting reference codec parameters.\n");
3078  return ret;
3079  }
3080 
3081  if (!codec_tag) {
3082  unsigned int codec_tag_tmp;
3083  if (!of->ctx->oformat->codec_tag ||
3084  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3085  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3086  codec_tag = par_src->codec_tag;
3087  }
3088 
3089  ret = avcodec_parameters_copy(par_dst, par_src);
3090  if (ret < 0)
3091  return ret;
3092 
3093  par_dst->codec_tag = codec_tag;
3094 
3095  if (!ost->frame_rate.num)
3096  ost->frame_rate = ist->framerate;
3097  ost->st->avg_frame_rate = ost->frame_rate;
3098 
3100  if (ret < 0)
3101  return ret;
3102 
3103  // copy timebase while removing common factors
3104  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3106 
3107  // copy estimated duration as a hint to the muxer
3108  if (ost->st->duration <= 0 && ist->st->duration > 0)
3109  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3110 
3111  // copy disposition
3112  ost->st->disposition = ist->st->disposition;
3113 
3114  if (ist->st->nb_side_data) {
3115  for (i = 0; i < ist->st->nb_side_data; i++) {
3116  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3117  uint8_t *dst_data;
3118 
3119  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3120  if (!dst_data)
3121  return AVERROR(ENOMEM);
3122  memcpy(dst_data, sd_src->data, sd_src->size);
3123  }
3124  }
3125 
3126  if (ost->rotate_overridden) {
3128  sizeof(int32_t) * 9);
3129  if (sd)
3131  }
3132 
3133  switch (par_dst->codec_type) {
3134  case AVMEDIA_TYPE_AUDIO:
3135  if (audio_volume != 256) {
3136  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3137  exit_program(1);
3138  }
3139  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3140  par_dst->block_align= 0;
3141  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3142  par_dst->block_align= 0;
3143  break;
3144  case AVMEDIA_TYPE_VIDEO:
3145  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3146  sar =
3148  (AVRational){ par_dst->height, par_dst->width });
3149  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3150  "with stream copy may produce invalid files\n");
3151  }
3152  else if (ist->st->sample_aspect_ratio.num)
3153  sar = ist->st->sample_aspect_ratio;
3154  else
3155  sar = par_src->sample_aspect_ratio;
3156  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3157  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3158  ost->st->r_frame_rate = ist->st->r_frame_rate;
3159  break;
3160  }
3161 
3162  ost->mux_timebase = ist->st->time_base;
3163 
3164  return 0;
3165 }
3166 
3168 {
3169  AVDictionaryEntry *e;
3170 
3171  uint8_t *encoder_string;
3172  int encoder_string_len;
3173  int format_flags = 0;
3174  int codec_flags = ost->enc_ctx->flags;
3175 
3176  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3177  return;
3178 
3179  e = av_dict_get(of->opts, "fflags", NULL, 0);
3180  if (e) {
3181  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3182  if (!o)
3183  return;
3184  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3185  }
3186  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3187  if (e) {
3188  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3189  if (!o)
3190  return;
3191  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3192  }
3193 
3194  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3195  encoder_string = av_mallocz(encoder_string_len);
3196  if (!encoder_string)
3197  exit_program(1);
3198 
3199  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3200  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3201  else
3202  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3203  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3204  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3206 }
3207 
3208 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3209  AVCodecContext *avctx)
3210 {
3211  char *p;
3212  int n = 1, i, size, index = 0;
3213  int64_t t, *pts;
3214 
3215  for (p = kf; *p; p++)
3216  if (*p == ',')
3217  n++;
3218  size = n;
3219  pts = av_malloc_array(size, sizeof(*pts));
3220  if (!pts) {
3221  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3222  exit_program(1);
3223  }
3224 
3225  p = kf;
3226  for (i = 0; i < n; i++) {
3227  char *next = strchr(p, ',');
3228 
3229  if (next)
3230  *next++ = 0;
3231 
3232  if (!memcmp(p, "chapters", 8)) {
3233 
3234  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3235  int j;
3236 
3237  if (avf->nb_chapters > INT_MAX - size ||
3238  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3239  sizeof(*pts)))) {
3241  "Could not allocate forced key frames array.\n");
3242  exit_program(1);
3243  }
3244  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3245  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3246 
3247  for (j = 0; j < avf->nb_chapters; j++) {
3248  AVChapter *c = avf->chapters[j];
3249  av_assert1(index < size);
3250  pts[index++] = av_rescale_q(c->start, c->time_base,
3251  avctx->time_base) + t;
3252  }
3253 
3254  } else {
3255 
3256  t = parse_time_or_die("force_key_frames", p, 1);
3257  av_assert1(index < size);
3258  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3259 
3260  }
3261 
3262  p = next;
3263  }
3264 
3265  av_assert0(index == size);
3266  qsort(pts, size, sizeof(*pts), compare_int64);
3267  ost->forced_kf_count = size;
3268  ost->forced_kf_pts = pts;
3269 }
3270 
3271 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3272 {
3273  InputStream *ist = get_input_stream(ost);
3274  AVCodecContext *enc_ctx = ost->enc_ctx;
3275  AVFormatContext *oc;
3276 
3277  if (ost->enc_timebase.num > 0) {
3278  enc_ctx->time_base = ost->enc_timebase;
3279  return;
3280  }
3281 
3282  if (ost->enc_timebase.num < 0) {
3283  if (ist) {
3284  enc_ctx->time_base = ist->st->time_base;
3285  return;
3286  }
3287 
3288  oc = output_files[ost->file_index]->ctx;
3289  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3290  }
3291 
3292  enc_ctx->time_base = default_time_base;
3293 }
3294 
3296 {
3297  InputStream *ist = get_input_stream(ost);
3298  AVCodecContext *enc_ctx = ost->enc_ctx;
3300  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3301  int j, ret;
3302 
3303  set_encoder_id(output_files[ost->file_index], ost);
3304 
3305  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3306  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3307  // which have to be filtered out to prevent leaking them to output files.
3308  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3309 
3310  if (ist) {
3311  ost->st->disposition = ist->st->disposition;
3312 
3313  dec_ctx = ist->dec_ctx;
3314 
3315  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3316  } else {
3317  for (j = 0; j < oc->nb_streams; j++) {
3318  AVStream *st = oc->streams[j];
3319  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3320  break;
3321  }
3322  if (j == oc->nb_streams)
3323  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3326  }
3327 
3328  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3329  if (!ost->frame_rate.num)
3331  if (ist && !ost->frame_rate.num)
3332  ost->frame_rate = ist->framerate;
3333  if (ist && !ost->frame_rate.num)
3334  ost->frame_rate = ist->st->r_frame_rate;
3335  if (ist && !ost->frame_rate.num) {
3336  ost->frame_rate = (AVRational){25, 1};
3338  "No information "
3339  "about the input framerate is available. Falling "
3340  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3341  "if you want a different framerate.\n",
3342  ost->file_index, ost->index);
3343  }
3344 
3345  if (ost->enc->supported_framerates && !ost->force_fps) {
3346  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3347  ost->frame_rate = ost->enc->supported_framerates[idx];
3348  }
3349  // reduce frame rate for mpeg4 to be within the spec limits
3350  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3351  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3352  ost->frame_rate.num, ost->frame_rate.den, 65535);
3353  }
3354  }
3355 
3356  switch (enc_ctx->codec_type) {
3357  case AVMEDIA_TYPE_AUDIO:
3359  if (dec_ctx)
3360  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3361  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3365 
3366  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3367  break;
3368 
3369  case AVMEDIA_TYPE_VIDEO:
3371 
3372  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3374  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3376  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3377  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3378  }
3379 
3380  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3381  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3382  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3383  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3384  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3386 
3387  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3388  if (dec_ctx)
3389  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3390  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3391 
3392  enc_ctx->framerate = ost->frame_rate;
3393 
3394  ost->st->avg_frame_rate = ost->frame_rate;
3395 
3396  if (!dec_ctx ||
3397  enc_ctx->width != dec_ctx->width ||
3398  enc_ctx->height != dec_ctx->height ||
3399  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3401  }
3402 
3403  if (ost->top_field_first == 0) {
3404  enc_ctx->field_order = AV_FIELD_BB;
3405  } else if (ost->top_field_first == 1) {
3406  enc_ctx->field_order = AV_FIELD_TT;
3407  }
3408 
3409  if (ost->forced_keyframes) {
3410  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3413  if (ret < 0) {
3415  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3416  return ret;
3417  }
3422 
3423  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3424  // parse it only for static kf timings
3425  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3427  }
3428  }
3429  break;
3430  case AVMEDIA_TYPE_SUBTITLE:
3431  enc_ctx->time_base = AV_TIME_BASE_Q;
3432  if (!enc_ctx->width) {
3433  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3434  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3435  }
3436  break;
3437  case AVMEDIA_TYPE_DATA:
3438  break;
3439  default:
3440  abort();
3441  break;
3442  }
3443 
3444  ost->mux_timebase = enc_ctx->time_base;
3445 
3446  return 0;
3447 }
3448 
3449 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3450 {
3451  int ret = 0;
3452 
3453  if (ost->encoding_needed) {
3454  AVCodec *codec = ost->enc;
3455  AVCodecContext *dec = NULL;
3456  InputStream *ist;
3457 
3458  ret = init_output_stream_encode(ost);
3459  if (ret < 0)
3460  return ret;
3461 
3462  if ((ist = get_input_stream(ost)))
3463  dec = ist->dec_ctx;
3464  if (dec && dec->subtitle_header) {
3465  /* ASS code assumes this buffer is null terminated so add extra byte. */
3467  if (!ost->enc_ctx->subtitle_header)
3468  return AVERROR(ENOMEM);
3469  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3471  }
3472  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3473  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3474  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3475  !codec->defaults &&
3476  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3477  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3478  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3479 
3480  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3484  if (!ost->enc_ctx->hw_frames_ctx)
3485  return AVERROR(ENOMEM);
3486  } else {
3487  ret = hw_device_setup_for_encode(ost);
3488  if (ret < 0) {
3489  snprintf(error, error_len, "Device setup failed for "
3490  "encoder on output stream #%d:%d : %s",
3491  ost->file_index, ost->index, av_err2str(ret));
3492  return ret;
3493  }
3494  }
3495  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3496  int input_props = 0, output_props = 0;
3497  AVCodecDescriptor const *input_descriptor =
3499  AVCodecDescriptor const *output_descriptor =
3501  if (input_descriptor)
3502  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3503  if (output_descriptor)
3504  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3505  if (input_props && output_props && input_props != output_props) {
3506  snprintf(error, error_len,
3507  "Subtitle encoding currently only possible from text to text "
3508  "or bitmap to bitmap");
3509  return AVERROR_INVALIDDATA;
3510  }
3511  }
3512 
3513  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3514  if (ret == AVERROR_EXPERIMENTAL)
3515  abort_codec_experimental(codec, 1);
3516  snprintf(error, error_len,
3517  "Error while opening encoder for output stream #%d:%d - "
3518  "maybe incorrect parameters such as bit_rate, rate, width or height",
3519  ost->file_index, ost->index);
3520  return ret;
3521  }
3522  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3523  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3525  ost->enc_ctx->frame_size);
3527  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3528  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3529  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3530  " It takes bits/s as argument, not kbits/s\n");
3531 
3533  if (ret < 0) {
3535  "Error initializing the output stream codec context.\n");
3536  exit_program(1);
3537  }
3538  /*
3539  * FIXME: ost->st->codec should't be needed here anymore.
3540  */
3541  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3542  if (ret < 0)
3543  return ret;
3544 
3545  if (ost->enc_ctx->nb_coded_side_data) {
3546  int i;
3547 
3548  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3549  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3550  uint8_t *dst_data;
3551 
3552  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3553  if (!dst_data)
3554  return AVERROR(ENOMEM);
3555  memcpy(dst_data, sd_src->data, sd_src->size);
3556  }
3557  }
3558 
3559  /*
3560  * Add global input side data. For now this is naive, and copies it
3561  * from the input stream's global side data. All side data should
3562  * really be funneled over AVFrame and libavfilter, then added back to
3563  * packet side data, and then potentially using the first packet for
3564  * global side data.
3565  */
3566  if (ist) {
3567  int i;
3568  for (i = 0; i < ist->st->nb_side_data; i++) {
3569  AVPacketSideData *sd = &ist->st->side_data[i];
3570  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3571  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3572  if (!dst)
3573  return AVERROR(ENOMEM);
3574  memcpy(dst, sd->data, sd->size);
3575  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3576  av_display_rotation_set((uint32_t *)dst, 0);
3577  }
3578  }
3579  }
3580 
3581  // copy timebase while removing common factors
3582  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3583  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3584 
3585  // copy estimated duration as a hint to the muxer
3586  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3587  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3588 
3589  ost->st->codec->codec= ost->enc_ctx->codec;
3590  } else if (ost->stream_copy) {
3591  ret = init_output_stream_streamcopy(ost);
3592  if (ret < 0)
3593  return ret;
3594  }
3595 
3596  // parse user provided disposition, and update stream values
3597  if (ost->disposition) {
3598  static const AVOption opts[] = {
3599  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3600  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3601  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3602  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3603  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3604  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3605  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3606  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3607  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3608  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3609  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3610  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3611  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3612  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3613  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3614  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3615  { NULL },
3616  };
3617  static const AVClass class = {
3618  .class_name = "",
3619  .item_name = av_default_item_name,
3620  .option = opts,
3621  .version = LIBAVUTIL_VERSION_INT,
3622  };
3623  const AVClass *pclass = &class;
3624 
3625  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3626  if (ret < 0)
3627  return ret;
3628  }
3629 
3630  /* initialize bitstream filters for the output stream
3631  * needs to be done here, because the codec id for streamcopy is not
3632  * known until now */
3633  ret = init_output_bsfs(ost);
3634  if (ret < 0)
3635  return ret;
3636 
3637  ost->initialized = 1;
3638 
3639  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3640  if (ret < 0)
3641  return ret;
3642 
3643  return ret;
3644 }
3645 
3646 static void report_new_stream(int input_index, AVPacket *pkt)
3647 {
3648  InputFile *file = input_files[input_index];
3649  AVStream *st = file->ctx->streams[pkt->stream_index];
3650 
3651  if (pkt->stream_index < file->nb_streams_warn)
3652  return;
3653  av_log(file->ctx, AV_LOG_WARNING,
3654  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3656  input_index, pkt->stream_index,
3657  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3658  file->nb_streams_warn = pkt->stream_index + 1;
3659 }
3660 
3661 static int transcode_init(void)
3662 {
3663  int ret = 0, i, j, k;
3664  AVFormatContext *oc;
3665  OutputStream *ost;
3666  InputStream *ist;
3667  char error[1024] = {0};
3668 
3669  for (i = 0; i < nb_filtergraphs; i++) {
3670  FilterGraph *fg = filtergraphs[i];
3671  for (j = 0; j < fg->nb_outputs; j++) {
3672  OutputFilter *ofilter = fg->outputs[j];
3673  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3674  continue;
3675  if (fg->nb_inputs != 1)
3676  continue;
3677  for (k = nb_input_streams-1; k >= 0 ; k--)
3678  if (fg->inputs[0]->ist == input_streams[k])
3679  break;
3680  ofilter->ost->source_index = k;
3681  }
3682  }
3683 
3684  /* init framerate emulation */
3685  for (i = 0; i < nb_input_files; i++) {
3686  InputFile *ifile = input_files[i];
3687  if (ifile->rate_emu)
3688  for (j = 0; j < ifile->nb_streams; j++)
3689  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3690  }
3691 
3692  /* init input streams */
3693  for (i = 0; i < nb_input_streams; i++)
3694  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3695  for (i = 0; i < nb_output_streams; i++) {
3696  ost = output_streams[i];
3697  avcodec_close(ost->enc_ctx);
3698  }
3699  goto dump_format;
3700  }
3701 
3702  /* open each encoder */
3703  for (i = 0; i < nb_output_streams; i++) {
3704  // skip streams fed from filtergraphs until we have a frame for them
3705  if (output_streams[i]->filter)
3706  continue;
3707 
3708  ret = init_output_stream(output_streams[i], error, sizeof(error));
3709  if (ret < 0)
3710  goto dump_format;
3711  }
3712 
3713  /* discard unused programs */
3714  for (i = 0; i < nb_input_files; i++) {
3715  InputFile *ifile = input_files[i];
3716  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3717  AVProgram *p = ifile->ctx->programs[j];
3718  int discard = AVDISCARD_ALL;
3719 
3720  for (k = 0; k < p->nb_stream_indexes; k++)
3721  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3722  discard = AVDISCARD_DEFAULT;
3723  break;
3724  }
3725  p->discard = discard;
3726  }
3727  }
3728 
3729  /* write headers for files with no streams */
3730  for (i = 0; i < nb_output_files; i++) {
3731  oc = output_files[i]->ctx;
3732  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3733  ret = check_init_output_file(output_files[i], i);
3734  if (ret < 0)
3735  goto dump_format;
3736  }
3737  }
3738 
3739  dump_format:
3740  /* dump the stream mapping */
3741  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3742  for (i = 0; i < nb_input_streams; i++) {
3743  ist = input_streams[i];
3744 
3745  for (j = 0; j < ist->nb_filters; j++) {
3746  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3747  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3748  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3749  ist->filters[j]->name);
3750  if (nb_filtergraphs > 1)
3751  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3752  av_log(NULL, AV_LOG_INFO, "\n");
3753  }
3754  }
3755  }
3756 
3757  for (i = 0; i < nb_output_streams; i++) {
3758  ost = output_streams[i];
3759 
3760  if (ost->attachment_filename) {
3761  /* an attached file */
3762  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3763  ost->attachment_filename, ost->file_index, ost->index);
3764  continue;
3765  }
3766 
3767  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3768  /* output from a complex graph */
3769  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3770  if (nb_filtergraphs > 1)
3771  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3772 
3773  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3774  ost->index, ost->enc ? ost->enc->name : "?");
3775  continue;
3776  }
3777 
3778  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3779  input_streams[ost->source_index]->file_index,
3780  input_streams[ost->source_index]->st->index,
3781  ost->file_index,
3782  ost->index);
3783  if (ost->sync_ist != input_streams[ost->source_index])
3784  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3785  ost->sync_ist->file_index,
3786  ost->sync_ist->st->index);
3787  if (ost->stream_copy)
3788  av_log(NULL, AV_LOG_INFO, " (copy)");
3789  else {
3790  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3791  const AVCodec *out_codec = ost->enc;
3792  const char *decoder_name = "?";
3793  const char *in_codec_name = "?";
3794  const char *encoder_name = "?";
3795  const char *out_codec_name = "?";
3796  const AVCodecDescriptor *desc;
3797 
3798  if (in_codec) {
3799  decoder_name = in_codec->name;
3800  desc = avcodec_descriptor_get(in_codec->id);
3801  if (desc)
3802  in_codec_name = desc->name;
3803  if (!strcmp(decoder_name, in_codec_name))
3804  decoder_name = "native";
3805  }
3806 
3807  if (out_codec) {
3808  encoder_name = out_codec->name;
3809  desc = avcodec_descriptor_get(out_codec->id);
3810  if (desc)
3811  out_codec_name = desc->name;
3812  if (!strcmp(encoder_name, out_codec_name))
3813  encoder_name = "native";
3814  }
3815 
3816  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3817  in_codec_name, decoder_name,
3818  out_codec_name, encoder_name);
3819  }
3820  av_log(NULL, AV_LOG_INFO, "\n");
3821  }
3822 
3823  if (ret) {
3824  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3825  return ret;
3826  }
3827 
3829 
3830  return 0;
3831 }
3832 
3833 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3834 static int need_output(void)
3835 {
3836  int i;
3837 
3838  for (i = 0; i < nb_output_streams; i++) {
3839  OutputStream *ost = output_streams[i];
3840  OutputFile *of = output_files[ost->file_index];
3841  AVFormatContext *os = output_files[ost->file_index]->ctx;
3842 
3843  if (ost->finished ||
3844  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3845  continue;
3846  if (ost->frame_number >= ost->max_frames) {
3847  int j;
3848  for (j = 0; j < of->ctx->nb_streams; j++)
3849  close_output_stream(output_streams[of->ost_index + j]);
3850  continue;
3851  }
3852 
3853  return 1;
3854  }
3855 
3856  return 0;
3857 }
3858 
3859 /**
3860  * Select the output stream to process.
3861  *
3862  * @return selected output stream, or NULL if none available
3863  */
3865 {
3866  int i;
3867  int64_t opts_min = INT64_MAX;
3868  OutputStream *ost_min = NULL;
3869 
3870  for (i = 0; i < nb_output_streams; i++) {
3871  OutputStream *ost = output_streams[i];
3872  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3873  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3874  AV_TIME_BASE_Q);
3875  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3877  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3878  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3879 
3880  if (!ost->initialized && !ost->inputs_done)
3881  return ost;
3882 
3883  if (!ost->finished && opts < opts_min) {
3884  opts_min = opts;
3885  ost_min = ost->unavailable ? NULL : ost;
3886  }
3887  }
3888  return ost_min;
3889 }
3890 
3891 static void set_tty_echo(int on)
3892 {
3893 #if HAVE_TERMIOS_H
3894  struct termios tty;
3895  if (tcgetattr(0, &tty) == 0) {
3896  if (on) tty.c_lflag |= ECHO;
3897  else tty.c_lflag &= ~ECHO;
3898  tcsetattr(0, TCSANOW, &tty);
3899  }
3900 #endif
3901 }
3902 
3903 static int check_keyboard_interaction(int64_t cur_time)
3904 {
3905  int i, ret, key;
3906  static int64_t last_time;
3907  if (received_nb_signals)
3908  return AVERROR_EXIT;
3909  /* read_key() returns 0 on EOF */
3910  if(cur_time - last_time >= 100000 && !run_as_daemon){
3911  key = read_key();
3912  last_time = cur_time;
3913  }else
3914  key = -1;
3915  if (key == 'q')
3916  return AVERROR_EXIT;
3917  if (key == '+') av_log_set_level(av_log_get_level()+10);
3918  if (key == '-') av_log_set_level(av_log_get_level()-10);
3919  if (key == 's') qp_hist ^= 1;
3920  if (key == 'h'){
3921  if (do_hex_dump){
3922  do_hex_dump = do_pkt_dump = 0;
3923  } else if(do_pkt_dump){
3924  do_hex_dump = 1;
3925  } else
3926  do_pkt_dump = 1;
3928  }
3929  if (key == 'c' || key == 'C'){
3930  char buf[4096], target[64], command[256], arg[256] = {0};
3931  double time;
3932  int k, n = 0;
3933  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3934  i = 0;
3935  set_tty_echo(1);
3936  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3937  if (k > 0)
3938  buf[i++] = k;
3939  buf[i] = 0;
3940  set_tty_echo(0);
3941  fprintf(stderr, "\n");
3942  if (k > 0 &&
3943  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3944  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3945  target, time, command, arg);
3946  for (i = 0; i < nb_filtergraphs; i++) {
3947  FilterGraph *fg = filtergraphs[i];
3948  if (fg->graph) {
3949  if (time < 0) {
3950  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3951  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3952  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3953  } else if (key == 'c') {
3954  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3955  ret = AVERROR_PATCHWELCOME;
3956  } else {
3957  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3958  if (ret < 0)
3959  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3960  }
3961  }
3962  }
3963  } else {
3965  "Parse error, at least 3 arguments were expected, "
3966  "only %d given in string '%s'\n", n, buf);
3967  }
3968  }
3969  if (key == 'd' || key == 'D'){
3970  int debug=0;
3971  if(key == 'D') {
3972  debug = input_streams[0]->st->codec->debug<<1;
3973  if(!debug) debug = 1;
3974  while(debug & (FF_DEBUG_DCT_COEFF
3975 #if FF_API_DEBUG_MV
3976  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3977 #endif
3978  )) //unsupported, would just crash
3979  debug += debug;
3980  }else{
3981  char buf[32];
3982  int k = 0;
3983  i = 0;
3984  set_tty_echo(1);
3985  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3986  if (k > 0)
3987  buf[i++] = k;
3988  buf[i] = 0;
3989  set_tty_echo(0);
3990  fprintf(stderr, "\n");
3991  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3992  fprintf(stderr,"error parsing debug value\n");
3993  }
3994  for(i=0;i<nb_input_streams;i++) {
3995  input_streams[i]->st->codec->debug = debug;
3996  }
3997  for(i=0;i<nb_output_streams;i++) {
3998  OutputStream *ost = output_streams[i];
3999  ost->enc_ctx->debug = debug;
4000  }
4001  if(debug) av_log_set_level(AV_LOG_DEBUG);
4002  fprintf(stderr,"debug=%d\n", debug);
4003  }
4004  if (key == '?'){
4005  fprintf(stderr, "key function\n"
4006  "? show this help\n"
4007  "+ increase verbosity\n"
4008  "- decrease verbosity\n"
4009  "c Send command to first matching filter supporting it\n"
4010  "C Send/Queue command to all matching filters\n"
4011  "D cycle through available debug modes\n"
4012  "h dump packets/hex press to cycle through the 3 states\n"
4013  "q quit\n"
4014  "s Show QP histogram\n"
4015  );
4016  }
4017  return 0;
4018 }
4019 
4020 #if HAVE_THREADS
4021 static void *input_thread(void *arg)
4022 {
4023  InputFile *f = arg;
4024  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4025  int ret = 0;
4026 
4027  while (1) {
4028  AVPacket pkt;
4029  ret = av_read_frame(f->ctx, &pkt);
4030 
4031  if (ret == AVERROR(EAGAIN)) {
4032  av_usleep(10000);
4033  continue;
4034  }
4035  if (ret < 0) {
4036  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4037  break;
4038  }
4039  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4040  if (flags && ret == AVERROR(EAGAIN)) {
4041  flags = 0;
4042  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4044  "Thread message queue blocking; consider raising the "
4045  "thread_queue_size option (current value: %d)\n",
4046  f->thread_queue_size);
4047  }
4048  if (ret < 0) {
4049  if (ret != AVERROR_EOF)
4050  av_log(f->ctx, AV_LOG_ERROR,
4051  "Unable to send packet to main thread: %s\n",
4052  av_err2str(ret));
4053  av_packet_unref(&pkt);
4054  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4055  break;
4056  }
4057  }
4058 
4059  return NULL;
4060 }
4061 
4062 static void free_input_thread(int i)
4063 {
4064  InputFile *f = input_files[i];
4065  AVPacket pkt;
4066 
4067  if (!f || !f->in_thread_queue)
4068  return;
4070  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4071  av_packet_unref(&pkt);
4072 
4073  pthread_join(f->thread, NULL);
4074  f->joined = 1;
4075  av_thread_message_queue_free(&f->in_thread_queue);
4076 }
4077 
4078 static void free_input_threads(void)
4079 {
4080  int i;
4081 
4082  for (i = 0; i < nb_input_files; i++)
4083  free_input_thread(i);
4084 }
4085 
4086 static int init_input_thread(int i)
4087 {
4088  int ret;
4089  InputFile *f = input_files[i];
4090 
4091  if (nb_input_files == 1)
4092  return 0;
4093 
4094  if (f->ctx->pb ? !f->ctx->pb->seekable :
4095  strcmp(f->ctx->iformat->name, "lavfi"))
4096  f->non_blocking = 1;
4097  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4098  f->thread_queue_size, sizeof(AVPacket));
4099  if (ret < 0)
4100  return ret;
4101 
4102  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4103  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4104  av_thread_message_queue_free(&f->in_thread_queue);
4105  return AVERROR(ret);
4106  }
4107 
4108  return 0;
4109 }
4110 
4111 static int init_input_threads(void)
4112 {
4113  int i, ret;
4114 
4115  for (i = 0; i < nb_input_files; i++) {
4116  ret = init_input_thread(i);
4117  if (ret < 0)
4118  return ret;
4119  }
4120  return 0;
4121 }
4122 
4123 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4124 {
4125  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4126  f->non_blocking ?
4128 }
4129 #endif
4130 
4132 {
4133  if (f->rate_emu) {
4134  int i;
4135  for (i = 0; i < f->nb_streams; i++) {
4136  InputStream *ist = input_streams[f->ist_index + i];
4137  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4138  int64_t now = av_gettime_relative() - ist->start;
4139  if (pts > now)
4140  return AVERROR(EAGAIN);
4141  }
4142  }
4143 
4144 #if HAVE_THREADS
4145  if (nb_input_files > 1)
4146  return get_input_packet_mt(f, pkt);
4147 #endif
4148  return av_read_frame(f->ctx, pkt);
4149 }
4150 
4151 static int got_eagain(void)
4152 {
4153  int i;
4154  for (i = 0; i < nb_output_streams; i++)
4155  if (output_streams[i]->unavailable)
4156  return 1;
4157  return 0;
4158 }
4159 
4160 static void reset_eagain(void)
4161 {
4162  int i;
4163  for (i = 0; i < nb_input_files; i++)
4164  input_files[i]->eagain = 0;
4165  for (i = 0; i < nb_output_streams; i++)
4166  output_streams[i]->unavailable = 0;
4167 }
4168 
4169 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4170 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4171  AVRational time_base)
4172 {
4173  int ret;
4174 
4175  if (!*duration) {
4176  *duration = tmp;
4177  return tmp_time_base;
4178  }
4179 
4180  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4181  if (ret < 0) {
4182  *duration = tmp;
4183  return tmp_time_base;
4184  }
4185 
4186  return time_base;
4187 }
4188 
4190 {
4191  InputStream *ist;
4192  AVCodecContext *avctx;
4193  int i, ret, has_audio = 0;
4194  int64_t duration = 0;
4195 
4196  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4197  if (ret < 0)
4198  return ret;
4199 
4200  for (i = 0; i < ifile->nb_streams; i++) {
4201  ist = input_streams[ifile->ist_index + i];
4202  avctx = ist->dec_ctx;
4203 
4204  /* duration is the length of the last frame in a stream
4205  * when audio stream is present we don't care about
4206  * last video frame length because it's not defined exactly */
4207  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4208  has_audio = 1;
4209  }
4210 
4211  for (i = 0; i < ifile->nb_streams; i++) {
4212  ist = input_streams[ifile->ist_index + i];
4213  avctx = ist->dec_ctx;
4214 
4215  if (has_audio) {
4216  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4217  AVRational sample_rate = {1, avctx->sample_rate};
4218 
4219  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4220  } else {
4221  continue;
4222  }
4223  } else {
4224  if (ist->framerate.num) {
4225  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4226  } else if (ist->st->avg_frame_rate.num) {
4227  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4228  } else {
4229  duration = 1;
4230  }
4231  }
4232  if (!ifile->duration)
4233  ifile->time_base = ist->st->time_base;
4234  /* the total duration of the stream, max_pts - min_pts is
4235  * the duration of the stream without the last frame */
4236  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4237  duration += ist->max_pts - ist->min_pts;
4238  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4239  ifile->time_base);
4240  }
4241 
4242  if (ifile->loop > 0)
4243  ifile->loop--;
4244 
4245  return ret;
4246 }
4247 
4248 /*
4249  * Return
4250  * - 0 -- one packet was read and processed
4251  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4252  * this function should be called again
4253  * - AVERROR_EOF -- this function should not be called again
4254  */
4255 static int process_input(int file_index)
4256 {
4257  InputFile *ifile = input_files[file_index];
4259  InputStream *ist;
4260  AVPacket pkt;
4261  int ret, thread_ret, i, j;
4262  int64_t duration;
4263  int64_t pkt_dts;
4264 
4265  is = ifile->ctx;
4266  ret = get_input_packet(ifile, &pkt);
4267 
4268  if (ret == AVERROR(EAGAIN)) {
4269  ifile->eagain = 1;
4270  return ret;
4271  }
4272  if (ret < 0 && ifile->loop) {
4273  AVCodecContext *avctx;
4274  for (i = 0; i < ifile->nb_streams; i++) {
4275  ist = input_streams[ifile->ist_index + i];
4276  avctx = ist->dec_ctx;
4277  if (ist->decoding_needed) {
4278  ret = process_input_packet(ist, NULL, 1);
4279  if (ret>0)
4280  return 0;
4281  avcodec_flush_buffers(avctx);
4282  }
4283  }
4284 #if HAVE_THREADS
4285  free_input_thread(file_index);
4286 #endif
4287  ret = seek_to_start(ifile, is);
4288 #if HAVE_THREADS
4289  thread_ret = init_input_thread(file_index);
4290  if (thread_ret < 0)
4291  return thread_ret;
4292 #endif
4293  if (ret < 0)
4294  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4295  else
4296  ret = get_input_packet(ifile, &pkt);
4297  if (ret == AVERROR(EAGAIN)) {
4298  ifile->eagain = 1;
4299  return ret;
4300  }
4301  }
4302  if (ret < 0) {
4303  if (ret != AVERROR_EOF) {
4304  print_error(is->url, ret);
4305  if (exit_on_error)
4306  exit_program(1);
4307  }
4308 
4309  for (i = 0; i < ifile->nb_streams; i++) {
4310  ist = input_streams[ifile->ist_index + i];
4311  if (ist->decoding_needed) {
4312  ret = process_input_packet(ist, NULL, 0);
4313  if (ret>0)
4314  return 0;
4315  }
4316 
4317  /* mark all outputs that don't go through lavfi as finished */
4318  for (j = 0; j < nb_output_streams; j++) {
4319  OutputStream *ost = output_streams[j];
4320 
4321  if (ost->source_index == ifile->ist_index + i &&
4322  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4323  finish_output_stream(ost);
4324  }
4325  }
4326 
4327  ifile->eof_reached = 1;
4328  return AVERROR(EAGAIN);
4329  }
4330 
4331  reset_eagain();
4332 
4333  if (do_pkt_dump) {
4335  is->streams[pkt.stream_index]);
4336  }
4337  /* the following test is needed in case new streams appear
4338  dynamically in stream : we ignore them */
4339  if (pkt.stream_index >= ifile->nb_streams) {
4340  report_new_stream(file_index, &pkt);
4341  goto discard_packet;
4342  }
4343 
4344  ist = input_streams[ifile->ist_index + pkt.stream_index];
4345 
4346  ist->data_size += pkt.size;
4347  ist->nb_packets++;
4348 
4349  if (ist->discard)
4350  goto discard_packet;
4351 
4352  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4354  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4355  if (exit_on_error)
4356  exit_program(1);
4357  }
4358 
4359  if (debug_ts) {
4360  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4361  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4365  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4366  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4367  av_ts2str(input_files[ist->file_index]->ts_offset),
4368  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4369  }
4370 
4371  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4372  int64_t stime, stime2;
4373  // Correcting starttime based on the enabled streams
4374  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4375  // so we instead do it here as part of discontinuity handling
4376  if ( ist->next_dts == AV_NOPTS_VALUE
4377  && ifile->ts_offset == -is->start_time
4378  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4379  int64_t new_start_time = INT64_MAX;
4380  for (i=0; i<is->nb_streams; i++) {
4381  AVStream *st = is->streams[i];
4382  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4383  continue;
4384  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4385  }
4386  if (new_start_time > is->start_time) {
4387  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4388  ifile->ts_offset = -new_start_time;
4389  }
4390  }
4391 
4392  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4393  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4394  ist->wrap_correction_done = 1;
4395 
4396  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4397  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4398  ist->wrap_correction_done = 0;
4399  }
4400  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4401  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4402  ist->wrap_correction_done = 0;
4403  }
4404  }
4405 
4406  /* add the stream-global side data to the first packet */
4407  if (ist->nb_packets == 1) {
4408  for (i = 0; i < ist->st->nb_side_data; i++) {
4409  AVPacketSideData *src_sd = &ist->st->side_data[i];
4410  uint8_t *dst_data;
4411 
4412  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4413  continue;
4414 
4415  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4416  continue;
4417 
4418  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4419  if (!dst_data)
4420  exit_program(1);
4421 
4422  memcpy(dst_data, src_sd->data, src_sd->size);
4423  }
4424  }
4425 
4426  if (pkt.dts != AV_NOPTS_VALUE)
4427  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4428  if (pkt.pts != AV_NOPTS_VALUE)
4429  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4430 
4431  if (pkt.pts != AV_NOPTS_VALUE)
4432  pkt.pts *= ist->ts_scale;
4433  if (pkt.dts != AV_NOPTS_VALUE)
4434  pkt.dts *= ist->ts_scale;
4435 
4437  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4439  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4440  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4441  int64_t delta = pkt_dts - ifile->last_ts;
4442  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4443  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4444  ifile->ts_offset -= delta;
4446  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4447  delta, ifile->ts_offset);
4448  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4449  if (pkt.pts != AV_NOPTS_VALUE)
4450  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4451  }
4452  }
4453 
4454  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4455  if (pkt.pts != AV_NOPTS_VALUE) {
4456  pkt.pts += duration;
4457  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4458  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4459  }
4460 
4461  if (pkt.dts != AV_NOPTS_VALUE)
4462  pkt.dts += duration;
4463 
4465  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4467  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4468  !copy_ts) {
4469  int64_t delta = pkt_dts - ist->next_dts;
4470  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4471  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4472  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4473  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4474  ifile->ts_offset -= delta;
4476  "timestamp discontinuity for stream #%d:%d "
4477  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4478  ist->file_index, ist->st->index, ist->st->id,
4480  delta, ifile->ts_offset);
4481  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4482  if (pkt.pts != AV_NOPTS_VALUE)
4483  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4484  }
4485  } else {
4486  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4487  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4488  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4489  pkt.dts = AV_NOPTS_VALUE;
4490  }
4491  if (pkt.pts != AV_NOPTS_VALUE){
4492  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4493  delta = pkt_pts - ist->next_dts;
4494  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4495  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4496  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4497  pkt.pts = AV_NOPTS_VALUE;
4498  }
4499  }
4500  }
4501  }
4502 
4503  if (pkt.dts != AV_NOPTS_VALUE)
4504  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4505 
4506  if (debug_ts) {
4507  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4509  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4510  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4511  av_ts2str(input_files[ist->file_index]->ts_offset),
4512  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4513  }
4514 
4515  sub2video_heartbeat(ist, pkt.pts);
4516 
4517  process_input_packet(ist, &pkt, 0);
4518 
4519 discard_packet:
4520  av_packet_unref(&pkt);
4521 
4522  return 0;
4523 }
4524 
4525 /**
4526  * Perform a step of transcoding for the specified filter graph.
4527  *
4528  * @param[in] graph filter graph to consider
4529  * @param[out] best_ist input stream where a frame would allow to continue
4530  * @return 0 for success, <0 for error
4531  */
4532 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4533 {
4534  int i, ret;
4535  int nb_requests, nb_requests_max = 0;
4536  InputFilter *ifilter;
4537  InputStream *ist;
4538 
4539  *best_ist = NULL;
4540  ret = avfilter_graph_request_oldest(graph->graph);
4541  if (ret >= 0)
4542  return reap_filters(0);
4543 
4544  if (ret == AVERROR_EOF) {
4545  ret = reap_filters(1);
4546  for (i = 0; i < graph->nb_outputs; i++)
4547  close_output_stream(graph->outputs[i]->ost);
4548  return ret;
4549  }
4550  if (ret != AVERROR(EAGAIN))
4551  return ret;
4552 
4553  for (i = 0; i < graph->nb_inputs; i++) {
4554  ifilter = graph->inputs[i];
4555  ist = ifilter->ist;
4556  if (input_files[ist->file_index]->eagain ||
4557  input_files[ist->file_index]->eof_reached)
4558  continue;
4559  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->