FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
320  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  read(0, &ch, 1);
472  return ch;
473  }else{
474  return -1;
475  }
476  }
477 # endif
478  if(kbhit())
479  return(getch());
480 #endif
481  return -1;
482 }
483 
484 static int decode_interrupt_cb(void *ctx)
485 {
487 }
488 
490 
491 static void ffmpeg_cleanup(int ret)
492 {
493  int i, j;
494 
495  if (do_benchmark) {
496  int maxrss = getmaxrss() / 1024;
497  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498  }
499 
500  for (i = 0; i < nb_filtergraphs; i++) {
501  FilterGraph *fg = filtergraphs[i];
503  for (j = 0; j < fg->nb_inputs; j++) {
504  InputFilter *ifilter = fg->inputs[j];
505  struct InputStream *ist = ifilter->ist;
506 
507  while (av_fifo_size(ifilter->frame_queue)) {
508  AVFrame *frame;
509  av_fifo_generic_read(ifilter->frame_queue, &frame,
510  sizeof(frame), NULL);
511  av_frame_free(&frame);
512  }
513  av_fifo_freep(&ifilter->frame_queue);
514  if (ist->sub2video.sub_queue) {
515  while (av_fifo_size(ist->sub2video.sub_queue)) {
516  AVSubtitle sub;
518  &sub, sizeof(sub), NULL);
519  avsubtitle_free(&sub);
520  }
522  }
523  av_buffer_unref(&ifilter->hw_frames_ctx);
524  av_freep(&ifilter->name);
525  av_freep(&fg->inputs[j]);
526  }
527  av_freep(&fg->inputs);
528  for (j = 0; j < fg->nb_outputs; j++) {
529  OutputFilter *ofilter = fg->outputs[j];
530 
531  av_freep(&ofilter->name);
532  av_freep(&ofilter->formats);
533  av_freep(&ofilter->channel_layouts);
534  av_freep(&ofilter->sample_rates);
535  av_freep(&fg->outputs[j]);
536  }
537  av_freep(&fg->outputs);
538  av_freep(&fg->graph_desc);
539 
540  av_freep(&filtergraphs[i]);
541  }
542  av_freep(&filtergraphs);
543 
545 
546  /* close files */
547  for (i = 0; i < nb_output_files; i++) {
548  OutputFile *of = output_files[i];
550  if (!of)
551  continue;
552  s = of->ctx;
553  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
554  avio_closep(&s->pb);
556  av_dict_free(&of->opts);
557 
558  av_freep(&output_files[i]);
559  }
560  for (i = 0; i < nb_output_streams; i++) {
561  OutputStream *ost = output_streams[i];
562 
563  if (!ost)
564  continue;
565 
566  av_bsf_free(&ost->bsf_ctx);
567 
569  av_frame_free(&ost->last_frame);
570  av_dict_free(&ost->encoder_opts);
571 
572  av_freep(&ost->forced_keyframes);
574  av_freep(&ost->avfilter);
575  av_freep(&ost->logfile_prefix);
576 
578  ost->audio_channels_mapped = 0;
579 
580  av_dict_free(&ost->sws_dict);
581  av_dict_free(&ost->swr_opts);
582 
585 
586  if (ost->muxing_queue) {
587  while (av_fifo_size(ost->muxing_queue)) {
588  AVPacket pkt;
589  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
590  av_packet_unref(&pkt);
591  }
593  }
594 
595  av_freep(&output_streams[i]);
596  }
597 #if HAVE_THREADS
598  free_input_threads();
599 #endif
600  for (i = 0; i < nb_input_files; i++) {
601  avformat_close_input(&input_files[i]->ctx);
602  av_freep(&input_files[i]);
603  }
604  for (i = 0; i < nb_input_streams; i++) {
605  InputStream *ist = input_streams[i];
606 
609  av_dict_free(&ist->decoder_opts);
612  av_freep(&ist->filters);
613  av_freep(&ist->hwaccel_device);
614  av_freep(&ist->dts_buffer);
615 
617 
618  av_freep(&input_streams[i]);
619  }
620 
621  if (vstats_file) {
622  if (fclose(vstats_file))
624  "Error closing vstats file, loss of information possible: %s\n",
625  av_err2str(AVERROR(errno)));
626  }
628 
629  av_freep(&input_streams);
630  av_freep(&input_files);
631  av_freep(&output_streams);
632  av_freep(&output_files);
633 
634  uninit_opts();
635 
637 
638  if (received_sigterm) {
639  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
640  (int) received_sigterm);
641  } else if (ret && atomic_load(&transcode_init_done)) {
642  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
643  }
644  term_exit();
645  ffmpeg_exited = 1;
646 }
647 
649 {
650  AVDictionaryEntry *t = NULL;
651 
652  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
654  }
655 }
656 
658 {
660  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
661  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
662  exit_program(1);
663  }
664 }
665 
666 static void abort_codec_experimental(AVCodec *c, int encoder)
667 {
668  exit_program(1);
669 }
670 
671 static void update_benchmark(const char *fmt, ...)
672 {
673  if (do_benchmark_all) {
675  va_list va;
676  char buf[1024];
677 
678  if (fmt) {
679  va_start(va, fmt);
680  vsnprintf(buf, sizeof(buf), fmt, va);
681  va_end(va);
683  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
684  t.user_usec - current_time.user_usec,
685  t.sys_usec - current_time.sys_usec,
686  t.real_usec - current_time.real_usec, buf);
687  }
688  current_time = t;
689  }
690 }
691 
693 {
694  int i;
695  for (i = 0; i < nb_output_streams; i++) {
696  OutputStream *ost2 = output_streams[i];
697  ost2->finished |= ost == ost2 ? this_stream : others;
698  }
699 }
700 
701 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
702 {
703  AVFormatContext *s = of->ctx;
704  AVStream *st = ost->st;
705  int ret;
706 
707  /*
708  * Audio encoders may split the packets -- #frames in != #packets out.
709  * But there is no reordering, so we can limit the number of output packets
710  * by simply dropping them here.
711  * Counting encoded video frames needs to be done separately because of
712  * reordering, see do_video_out().
713  * Do not count the packet when unqueued because it has been counted when queued.
714  */
715  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
716  if (ost->frame_number >= ost->max_frames) {
717  av_packet_unref(pkt);
718  return;
719  }
720  ost->frame_number++;
721  }
722 
723  if (!of->header_written) {
724  AVPacket tmp_pkt = {0};
725  /* the muxer is not initialized yet, buffer the packet */
726  if (!av_fifo_space(ost->muxing_queue)) {
727  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
728  ost->max_muxing_queue_size);
729  if (new_size <= av_fifo_size(ost->muxing_queue)) {
731  "Too many packets buffered for output stream %d:%d.\n",
732  ost->file_index, ost->st->index);
733  exit_program(1);
734  }
735  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
736  if (ret < 0)
737  exit_program(1);
738  }
739  ret = av_packet_make_refcounted(pkt);
740  if (ret < 0)
741  exit_program(1);
742  av_packet_move_ref(&tmp_pkt, pkt);
743  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
744  return;
745  }
746 
749  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
750 
751  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
752  int i;
754  NULL);
755  ost->quality = sd ? AV_RL32(sd) : -1;
756  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
757 
758  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
759  if (sd && i < sd[5])
760  ost->error[i] = AV_RL64(sd + 8 + 8*i);
761  else
762  ost->error[i] = -1;
763  }
764 
765  if (ost->frame_rate.num && ost->is_cfr) {
766  if (pkt->duration > 0)
767  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
768  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
769  ost->mux_timebase);
770  }
771  }
772 
773  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
774 
775  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
776  if (pkt->dts != AV_NOPTS_VALUE &&
777  pkt->pts != AV_NOPTS_VALUE &&
778  pkt->dts > pkt->pts) {
779  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
780  pkt->dts, pkt->pts,
781  ost->file_index, ost->st->index);
782  pkt->pts =
783  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
784  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
785  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
786  }
788  pkt->dts != AV_NOPTS_VALUE &&
789  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
790  ost->last_mux_dts != AV_NOPTS_VALUE) {
791  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
792  if (pkt->dts < max) {
793  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
794  if (exit_on_error)
795  loglevel = AV_LOG_ERROR;
796  av_log(s, loglevel, "Non-monotonous DTS in output stream "
797  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
798  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
799  if (exit_on_error) {
800  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
801  exit_program(1);
802  }
803  av_log(s, loglevel, "changing to %"PRId64". This may result "
804  "in incorrect timestamps in the output file.\n",
805  max);
806  if (pkt->pts >= pkt->dts)
807  pkt->pts = FFMAX(pkt->pts, max);
808  pkt->dts = max;
809  }
810  }
811  }
812  ost->last_mux_dts = pkt->dts;
813 
814  ost->data_size += pkt->size;
815  ost->packets_written++;
816 
817  pkt->stream_index = ost->index;
818 
819  if (debug_ts) {
820  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
821  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
823  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
824  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
825  pkt->size
826  );
827  }
828 
829  ret = av_interleaved_write_frame(s, pkt);
830  if (ret < 0) {
831  print_error("av_interleaved_write_frame()", ret);
832  main_return_code = 1;
834  }
835  av_packet_unref(pkt);
836 }
837 
839 {
840  OutputFile *of = output_files[ost->file_index];
841 
842  ost->finished |= ENCODER_FINISHED;
843  if (of->shortest) {
844  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
845  of->recording_time = FFMIN(of->recording_time, end);
846  }
847 }
848 
849 /*
850  * Send a single packet to the output, applying any bitstream filters
851  * associated with the output stream. This may result in any number
852  * of packets actually being written, depending on what bitstream
853  * filters are applied. The supplied packet is consumed and will be
854  * blank (as if newly-allocated) when this function returns.
855  *
856  * If eof is set, instead indicate EOF to all bitstream filters and
857  * therefore flush any delayed packets to the output. A blank packet
858  * must be supplied in this case.
859  */
861  OutputStream *ost, int eof)
862 {
863  int ret = 0;
864 
865  /* apply the output bitstream filters */
866  if (ost->bsf_ctx) {
867  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
868  if (ret < 0)
869  goto finish;
870  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
871  write_packet(of, pkt, ost, 0);
872  if (ret == AVERROR(EAGAIN))
873  ret = 0;
874  } else if (!eof)
875  write_packet(of, pkt, ost, 0);
876 
877 finish:
878  if (ret < 0 && ret != AVERROR_EOF) {
879  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
880  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
881  if(exit_on_error)
882  exit_program(1);
883  }
884 }
885 
887 {
888  OutputFile *of = output_files[ost->file_index];
889 
890  if (of->recording_time != INT64_MAX &&
892  AV_TIME_BASE_Q) >= 0) {
893  close_output_stream(ost);
894  return 0;
895  }
896  return 1;
897 }
898 
900  AVFrame *frame)
901 {
902  AVCodecContext *enc = ost->enc_ctx;
903  AVPacket pkt;
904  int ret;
905 
906  av_init_packet(&pkt);
907  pkt.data = NULL;
908  pkt.size = 0;
909 
910  if (!check_recording_time(ost))
911  return;
912 
913  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
914  frame->pts = ost->sync_opts;
915  ost->sync_opts = frame->pts + frame->nb_samples;
916  ost->samples_encoded += frame->nb_samples;
917  ost->frames_encoded++;
918 
919  av_assert0(pkt.size || !pkt.data);
921  if (debug_ts) {
922  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
923  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
924  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
925  enc->time_base.num, enc->time_base.den);
926  }
927 
928  ret = avcodec_send_frame(enc, frame);
929  if (ret < 0)
930  goto error;
931 
932  while (1) {
933  ret = avcodec_receive_packet(enc, &pkt);
934  if (ret == AVERROR(EAGAIN))
935  break;
936  if (ret < 0)
937  goto error;
938 
939  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
940 
941  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
942 
943  if (debug_ts) {
944  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
945  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
946  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
947  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
948  }
949 
950  output_packet(of, &pkt, ost, 0);
951  }
952 
953  return;
954 error:
955  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
956  exit_program(1);
957 }
958 
959 static void do_subtitle_out(OutputFile *of,
960  OutputStream *ost,
961  AVSubtitle *sub)
962 {
963  int subtitle_out_max_size = 1024 * 1024;
964  int subtitle_out_size, nb, i;
965  AVCodecContext *enc;
966  AVPacket pkt;
967  int64_t pts;
968 
969  if (sub->pts == AV_NOPTS_VALUE) {
970  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
971  if (exit_on_error)
972  exit_program(1);
973  return;
974  }
975 
976  enc = ost->enc_ctx;
977 
978  if (!subtitle_out) {
979  subtitle_out = av_malloc(subtitle_out_max_size);
980  if (!subtitle_out) {
981  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
982  exit_program(1);
983  }
984  }
985 
986  /* Note: DVB subtitle need one packet to draw them and one other
987  packet to clear them */
988  /* XXX: signal it in the codec context ? */
990  nb = 2;
991  else
992  nb = 1;
993 
994  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
995  pts = sub->pts;
996  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
997  pts -= output_files[ost->file_index]->start_time;
998  for (i = 0; i < nb; i++) {
999  unsigned save_num_rects = sub->num_rects;
1000 
1001  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1002  if (!check_recording_time(ost))
1003  return;
1004 
1005  sub->pts = pts;
1006  // start_display_time is required to be 0
1007  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1008  sub->end_display_time -= sub->start_display_time;
1009  sub->start_display_time = 0;
1010  if (i == 1)
1011  sub->num_rects = 0;
1012 
1013  ost->frames_encoded++;
1014 
1015  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1016  subtitle_out_max_size, sub);
1017  if (i == 1)
1018  sub->num_rects = save_num_rects;
1019  if (subtitle_out_size < 0) {
1020  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1021  exit_program(1);
1022  }
1023 
1024  av_init_packet(&pkt);
1025  pkt.data = subtitle_out;
1026  pkt.size = subtitle_out_size;
1027  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1028  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1030  /* XXX: the pts correction is handled here. Maybe handling
1031  it in the codec would be better */
1032  if (i == 0)
1033  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1034  else
1035  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1036  }
1037  pkt.dts = pkt.pts;
1038  output_packet(of, &pkt, ost, 0);
1039  }
1040 }
1041 
1042 static void do_video_out(OutputFile *of,
1043  OutputStream *ost,
1044  AVFrame *next_picture,
1045  double sync_ipts)
1046 {
1047  int ret, format_video_sync;
1048  AVPacket pkt;
1049  AVCodecContext *enc = ost->enc_ctx;
1050  AVCodecParameters *mux_par = ost->st->codecpar;
1051  AVRational frame_rate;
1052  int nb_frames, nb0_frames, i;
1053  double delta, delta0;
1054  double duration = 0;
1055  int frame_size = 0;
1056  InputStream *ist = NULL;
1058 
1059  if (ost->source_index >= 0)
1060  ist = input_streams[ost->source_index];
1061 
1062  frame_rate = av_buffersink_get_frame_rate(filter);
1063  if (frame_rate.num > 0 && frame_rate.den > 0)
1064  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1065 
1066  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1067  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1068 
1069  if (!ost->filters_script &&
1070  !ost->filters &&
1071  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1072  next_picture &&
1073  ist &&
1074  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1075  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1076  }
1077 
1078  if (!next_picture) {
1079  //end, flushing
1080  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1081  ost->last_nb0_frames[1],
1082  ost->last_nb0_frames[2]);
1083  } else {
1084  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1085  delta = delta0 + duration;
1086 
1087  /* by default, we output a single frame */
1088  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1089  nb_frames = 1;
1090 
1091  format_video_sync = video_sync_method;
1092  if (format_video_sync == VSYNC_AUTO) {
1093  if(!strcmp(of->ctx->oformat->name, "avi")) {
1094  format_video_sync = VSYNC_VFR;
1095  } else
1096  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1097  if ( ist
1098  && format_video_sync == VSYNC_CFR
1099  && input_files[ist->file_index]->ctx->nb_streams == 1
1100  && input_files[ist->file_index]->input_ts_offset == 0) {
1101  format_video_sync = VSYNC_VSCFR;
1102  }
1103  if (format_video_sync == VSYNC_CFR && copy_ts) {
1104  format_video_sync = VSYNC_VSCFR;
1105  }
1106  }
1107  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1108 
1109  if (delta0 < 0 &&
1110  delta > 0 &&
1111  format_video_sync != VSYNC_PASSTHROUGH &&
1112  format_video_sync != VSYNC_DROP) {
1113  if (delta0 < -0.6) {
1114  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1115  } else
1116  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1117  sync_ipts = ost->sync_opts;
1118  duration += delta0;
1119  delta0 = 0;
1120  }
1121 
1122  switch (format_video_sync) {
1123  case VSYNC_VSCFR:
1124  if (ost->frame_number == 0 && delta0 >= 0.5) {
1125  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1126  delta = duration;
1127  delta0 = 0;
1128  ost->sync_opts = llrint(sync_ipts);
1129  }
1130  case VSYNC_CFR:
1131  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1132  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1133  nb_frames = 0;
1134  } else if (delta < -1.1)
1135  nb_frames = 0;
1136  else if (delta > 1.1) {
1137  nb_frames = lrintf(delta);
1138  if (delta0 > 1.1)
1139  nb0_frames = llrintf(delta0 - 0.6);
1140  }
1141  break;
1142  case VSYNC_VFR:
1143  if (delta <= -0.6)
1144  nb_frames = 0;
1145  else if (delta > 0.6)
1146  ost->sync_opts = llrint(sync_ipts);
1147  break;
1148  case VSYNC_DROP:
1149  case VSYNC_PASSTHROUGH:
1150  ost->sync_opts = llrint(sync_ipts);
1151  break;
1152  default:
1153  av_assert0(0);
1154  }
1155  }
1156 
1157  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1158  nb0_frames = FFMIN(nb0_frames, nb_frames);
1159 
1160  memmove(ost->last_nb0_frames + 1,
1161  ost->last_nb0_frames,
1162  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1163  ost->last_nb0_frames[0] = nb0_frames;
1164 
1165  if (nb0_frames == 0 && ost->last_dropped) {
1166  nb_frames_drop++;
1168  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1169  ost->frame_number, ost->st->index, ost->last_frame->pts);
1170  }
1171  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1172  if (nb_frames > dts_error_threshold * 30) {
1173  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1174  nb_frames_drop++;
1175  return;
1176  }
1177  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1178  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1179  if (nb_frames_dup > dup_warning) {
1180  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1181  dup_warning *= 10;
1182  }
1183  }
1184  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1185 
1186  /* duplicates frame if needed */
1187  for (i = 0; i < nb_frames; i++) {
1188  AVFrame *in_picture;
1189  int forced_keyframe = 0;
1190  double pts_time;
1191  av_init_packet(&pkt);
1192  pkt.data = NULL;
1193  pkt.size = 0;
1194 
1195  if (i < nb0_frames && ost->last_frame) {
1196  in_picture = ost->last_frame;
1197  } else
1198  in_picture = next_picture;
1199 
1200  if (!in_picture)
1201  return;
1202 
1203  in_picture->pts = ost->sync_opts;
1204 
1205  if (!check_recording_time(ost))
1206  return;
1207 
1209  ost->top_field_first >= 0)
1210  in_picture->top_field_first = !!ost->top_field_first;
1211 
1212  if (in_picture->interlaced_frame) {
1213  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1214  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1215  else
1216  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1217  } else
1218  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1219 
1220  in_picture->quality = enc->global_quality;
1221  in_picture->pict_type = 0;
1222 
1223  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1224  in_picture->pts != AV_NOPTS_VALUE)
1225  ost->forced_kf_ref_pts = in_picture->pts;
1226 
1227  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1228  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1229  if (ost->forced_kf_index < ost->forced_kf_count &&
1230  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1231  ost->forced_kf_index++;
1232  forced_keyframe = 1;
1233  } else if (ost->forced_keyframes_pexpr) {
1234  double res;
1235  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1238  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1244  res);
1245  if (res) {
1246  forced_keyframe = 1;
1252  }
1253 
1255  } else if ( ost->forced_keyframes
1256  && !strncmp(ost->forced_keyframes, "source", 6)
1257  && in_picture->key_frame==1
1258  && !i) {
1259  forced_keyframe = 1;
1260  }
1261 
1262  if (forced_keyframe) {
1263  in_picture->pict_type = AV_PICTURE_TYPE_I;
1264  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1265  }
1266 
1268  if (debug_ts) {
1269  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1270  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1271  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1272  enc->time_base.num, enc->time_base.den);
1273  }
1274 
1275  ost->frames_encoded++;
1276 
1277  ret = avcodec_send_frame(enc, in_picture);
1278  if (ret < 0)
1279  goto error;
1280  // Make sure Closed Captions will not be duplicated
1282 
1283  while (1) {
1284  ret = avcodec_receive_packet(enc, &pkt);
1285  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1286  if (ret == AVERROR(EAGAIN))
1287  break;
1288  if (ret < 0)
1289  goto error;
1290 
1291  if (debug_ts) {
1292  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1293  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1294  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1295  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1296  }
1297 
1298  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1299  pkt.pts = ost->sync_opts;
1300 
1301  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1302 
1303  if (debug_ts) {
1304  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1305  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1306  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1307  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1308  }
1309 
1310  frame_size = pkt.size;
1311  output_packet(of, &pkt, ost, 0);
1312 
1313  /* if two pass, output log */
1314  if (ost->logfile && enc->stats_out) {
1315  fprintf(ost->logfile, "%s", enc->stats_out);
1316  }
1317  }
1318  ost->sync_opts++;
1319  /*
1320  * For video, number of frames in == number of packets out.
1321  * But there may be reordering, so we can't throw away frames on encoder
1322  * flush, we need to limit them here, before they go into encoder.
1323  */
1324  ost->frame_number++;
1325 
1326  if (vstats_filename && frame_size)
1327  do_video_stats(ost, frame_size);
1328  }
1329 
1330  if (!ost->last_frame)
1331  ost->last_frame = av_frame_alloc();
1332  av_frame_unref(ost->last_frame);
1333  if (next_picture && ost->last_frame)
1334  av_frame_ref(ost->last_frame, next_picture);
1335  else
1336  av_frame_free(&ost->last_frame);
1337 
1338  return;
1339 error:
1340  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1341  exit_program(1);
1342 }
1343 
1344 static double psnr(double d)
1345 {
1346  return -10.0 * log10(d);
1347 }
1348 
1350 {
1351  AVCodecContext *enc;
1352  int frame_number;
1353  double ti1, bitrate, avg_bitrate;
1354 
1355  /* this is executed just the first time do_video_stats is called */
1356  if (!vstats_file) {
1357  vstats_file = fopen(vstats_filename, "w");
1358  if (!vstats_file) {
1359  perror("fopen");
1360  exit_program(1);
1361  }
1362  }
1363 
1364  enc = ost->enc_ctx;
1365  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1366  frame_number = ost->st->nb_frames;
1367  if (vstats_version <= 1) {
1368  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1369  ost->quality / (float)FF_QP2LAMBDA);
1370  } else {
1371  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1372  ost->quality / (float)FF_QP2LAMBDA);
1373  }
1374 
1375  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1376  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1377 
1378  fprintf(vstats_file,"f_size= %6d ", frame_size);
1379  /* compute pts value */
1380  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1381  if (ti1 < 0.01)
1382  ti1 = 0.01;
1383 
1384  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1385  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1386  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1387  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1388  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1389  }
1390 }
1391 
1392 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1393 
1395 {
1396  OutputFile *of = output_files[ost->file_index];
1397  int i;
1398 
1400 
1401  if (of->shortest) {
1402  for (i = 0; i < of->ctx->nb_streams; i++)
1403  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1404  }
1405 }
1406 
1407 /**
1408  * Get and encode new output from any of the filtergraphs, without causing
1409  * activity.
1410  *
1411  * @return 0 for success, <0 for severe errors
1412  */
1413 static int reap_filters(int flush)
1414 {
1415  AVFrame *filtered_frame = NULL;
1416  int i;
1417 
1418  /* Reap all buffers present in the buffer sinks */
1419  for (i = 0; i < nb_output_streams; i++) {
1420  OutputStream *ost = output_streams[i];
1421  OutputFile *of = output_files[ost->file_index];
1423  AVCodecContext *enc = ost->enc_ctx;
1424  int ret = 0;
1425 
1426  if (!ost->filter || !ost->filter->graph->graph)
1427  continue;
1428  filter = ost->filter->filter;
1429 
1430  if (!ost->initialized) {
1431  char error[1024] = "";
1432  ret = init_output_stream(ost, error, sizeof(error));
1433  if (ret < 0) {
1434  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1435  ost->file_index, ost->index, error);
1436  exit_program(1);
1437  }
1438  }
1439 
1440  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1441  return AVERROR(ENOMEM);
1442  }
1443  filtered_frame = ost->filtered_frame;
1444 
1445  while (1) {
1446  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1447  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1449  if (ret < 0) {
1450  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1452  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1453  } else if (flush && ret == AVERROR_EOF) {
1455  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1456  }
1457  break;
1458  }
1459  if (ost->finished) {
1460  av_frame_unref(filtered_frame);
1461  continue;
1462  }
1463  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1464  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1465  AVRational filter_tb = av_buffersink_get_time_base(filter);
1466  AVRational tb = enc->time_base;
1467  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1468 
1469  tb.den <<= extra_bits;
1470  float_pts =
1471  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1472  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1473  float_pts /= 1 << extra_bits;
1474  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1475  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1476 
1477  filtered_frame->pts =
1478  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1479  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1480  }
1481 
1482  switch (av_buffersink_get_type(filter)) {
1483  case AVMEDIA_TYPE_VIDEO:
1484  if (!ost->frame_aspect_ratio.num)
1485  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1486 
1487  if (debug_ts) {
1488  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1489  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1490  float_pts,
1491  enc->time_base.num, enc->time_base.den);
1492  }
1493 
1494  do_video_out(of, ost, filtered_frame, float_pts);
1495  break;
1496  case AVMEDIA_TYPE_AUDIO:
1497  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1498  enc->channels != filtered_frame->channels) {
1500  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1501  break;
1502  }
1503  do_audio_out(of, ost, filtered_frame);
1504  break;
1505  default:
1506  // TODO support subtitle filters
1507  av_assert0(0);
1508  }
1509 
1510  av_frame_unref(filtered_frame);
1511  }
1512  }
1513 
1514  return 0;
1515 }
1516 
1517 static void print_final_stats(int64_t total_size)
1518 {
1519  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1520  uint64_t subtitle_size = 0;
1521  uint64_t data_size = 0;
1522  float percent = -1.0;
1523  int i, j;
1524  int pass1_used = 1;
1525 
1526  for (i = 0; i < nb_output_streams; i++) {
1527  OutputStream *ost = output_streams[i];
1528  switch (ost->enc_ctx->codec_type) {
1529  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1530  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1531  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1532  default: other_size += ost->data_size; break;
1533  }
1534  extra_size += ost->enc_ctx->extradata_size;
1535  data_size += ost->data_size;
1538  pass1_used = 0;
1539  }
1540 
1541  if (data_size && total_size>0 && total_size >= data_size)
1542  percent = 100.0 * (total_size - data_size) / data_size;
1543 
1544  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1545  video_size / 1024.0,
1546  audio_size / 1024.0,
1547  subtitle_size / 1024.0,
1548  other_size / 1024.0,
1549  extra_size / 1024.0);
1550  if (percent >= 0.0)
1551  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1552  else
1553  av_log(NULL, AV_LOG_INFO, "unknown");
1554  av_log(NULL, AV_LOG_INFO, "\n");
1555 
1556  /* print verbose per-stream stats */
1557  for (i = 0; i < nb_input_files; i++) {
1558  InputFile *f = input_files[i];
1559  uint64_t total_packets = 0, total_size = 0;
1560 
1561  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1562  i, f->ctx->url);
1563 
1564  for (j = 0; j < f->nb_streams; j++) {
1565  InputStream *ist = input_streams[f->ist_index + j];
1566  enum AVMediaType type = ist->dec_ctx->codec_type;
1567 
1568  total_size += ist->data_size;
1569  total_packets += ist->nb_packets;
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1572  i, j, media_type_string(type));
1573  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1574  ist->nb_packets, ist->data_size);
1575 
1576  if (ist->decoding_needed) {
1577  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1578  ist->frames_decoded);
1579  if (type == AVMEDIA_TYPE_AUDIO)
1580  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1581  av_log(NULL, AV_LOG_VERBOSE, "; ");
1582  }
1583 
1584  av_log(NULL, AV_LOG_VERBOSE, "\n");
1585  }
1586 
1587  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1588  total_packets, total_size);
1589  }
1590 
1591  for (i = 0; i < nb_output_files; i++) {
1592  OutputFile *of = output_files[i];
1593  uint64_t total_packets = 0, total_size = 0;
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1596  i, of->ctx->url);
1597 
1598  for (j = 0; j < of->ctx->nb_streams; j++) {
1599  OutputStream *ost = output_streams[of->ost_index + j];
1600  enum AVMediaType type = ost->enc_ctx->codec_type;
1601 
1602  total_size += ost->data_size;
1603  total_packets += ost->packets_written;
1604 
1605  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1606  i, j, media_type_string(type));
1607  if (ost->encoding_needed) {
1608  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1609  ost->frames_encoded);
1610  if (type == AVMEDIA_TYPE_AUDIO)
1611  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1612  av_log(NULL, AV_LOG_VERBOSE, "; ");
1613  }
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1616  ost->packets_written, ost->data_size);
1617 
1618  av_log(NULL, AV_LOG_VERBOSE, "\n");
1619  }
1620 
1621  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1622  total_packets, total_size);
1623  }
1624  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1625  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1626  if (pass1_used) {
1627  av_log(NULL, AV_LOG_WARNING, "\n");
1628  } else {
1629  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1630  }
1631  }
1632 }
1633 
1634 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1635 {
1636  AVBPrint buf, buf_script;
1637  OutputStream *ost;
1638  AVFormatContext *oc;
1639  int64_t total_size;
1640  AVCodecContext *enc;
1641  int frame_number, vid, i;
1642  double bitrate;
1643  double speed;
1644  int64_t pts = INT64_MIN + 1;
1645  static int64_t last_time = -1;
1646  static int qp_histogram[52];
1647  int hours, mins, secs, us;
1648  const char *hours_sign;
1649  int ret;
1650  float t;
1651 
1652  if (!print_stats && !is_last_report && !progress_avio)
1653  return;
1654 
1655  if (!is_last_report) {
1656  if (last_time == -1) {
1657  last_time = cur_time;
1658  return;
1659  }
1660  if ((cur_time - last_time) < 500000)
1661  return;
1662  last_time = cur_time;
1663  }
1664 
1665  t = (cur_time-timer_start) / 1000000.0;
1666 
1667 
1668  oc = output_files[0]->ctx;
1669 
1670  total_size = avio_size(oc->pb);
1671  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1672  total_size = avio_tell(oc->pb);
1673 
1674  vid = 0;
1676  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1677  for (i = 0; i < nb_output_streams; i++) {
1678  float q = -1;
1679  ost = output_streams[i];
1680  enc = ost->enc_ctx;
1681  if (!ost->stream_copy)
1682  q = ost->quality / (float) FF_QP2LAMBDA;
1683 
1684  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1685  av_bprintf(&buf, "q=%2.1f ", q);
1686  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1687  ost->file_index, ost->index, q);
1688  }
1689  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1690  float fps;
1691 
1692  frame_number = ost->frame_number;
1693  fps = t > 1 ? frame_number / t : 0;
1694  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1695  frame_number, fps < 9.95, fps, q);
1696  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1697  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1698  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699  ost->file_index, ost->index, q);
1700  if (is_last_report)
1701  av_bprintf(&buf, "L");
1702  if (qp_hist) {
1703  int j;
1704  int qp = lrintf(q);
1705  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1706  qp_histogram[qp]++;
1707  for (j = 0; j < 32; j++)
1708  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1709  }
1710 
1711  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1712  int j;
1713  double error, error_sum = 0;
1714  double scale, scale_sum = 0;
1715  double p;
1716  char type[3] = { 'Y','U','V' };
1717  av_bprintf(&buf, "PSNR=");
1718  for (j = 0; j < 3; j++) {
1719  if (is_last_report) {
1720  error = enc->error[j];
1721  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1722  } else {
1723  error = ost->error[j];
1724  scale = enc->width * enc->height * 255.0 * 255.0;
1725  }
1726  if (j)
1727  scale /= 4;
1728  error_sum += error;
1729  scale_sum += scale;
1730  p = psnr(error / scale);
1731  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1732  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1733  ost->file_index, ost->index, type[j] | 32, p);
1734  }
1735  p = psnr(error_sum / scale_sum);
1736  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1737  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1738  ost->file_index, ost->index, p);
1739  }
1740  vid = 1;
1741  }
1742  /* compute min output value */
1744  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1745  ost->st->time_base, AV_TIME_BASE_Q));
1746  if (is_last_report)
1747  nb_frames_drop += ost->last_dropped;
1748  }
1749 
1750  secs = FFABS(pts) / AV_TIME_BASE;
1751  us = FFABS(pts) % AV_TIME_BASE;
1752  mins = secs / 60;
1753  secs %= 60;
1754  hours = mins / 60;
1755  mins %= 60;
1756  hours_sign = (pts < 0) ? "-" : "";
1757 
1758  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1759  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1760 
1761  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1762  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1763  if (pts == AV_NOPTS_VALUE) {
1764  av_bprintf(&buf, "N/A ");
1765  } else {
1766  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1767  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1768  }
1769 
1770  if (bitrate < 0) {
1771  av_bprintf(&buf, "bitrate=N/A");
1772  av_bprintf(&buf_script, "bitrate=N/A\n");
1773  }else{
1774  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1775  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1776  }
1777 
1778  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1779  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1780  if (pts == AV_NOPTS_VALUE) {
1781  av_bprintf(&buf_script, "out_time_us=N/A\n");
1782  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1783  av_bprintf(&buf_script, "out_time=N/A\n");
1784  } else {
1785  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1786  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1787  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1788  hours_sign, hours, mins, secs, us);
1789  }
1790 
1792  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1793  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1794  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1795 
1796  if (speed < 0) {
1797  av_bprintf(&buf, " speed=N/A");
1798  av_bprintf(&buf_script, "speed=N/A\n");
1799  } else {
1800  av_bprintf(&buf, " speed=%4.3gx", speed);
1801  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1802  }
1803 
1804  if (print_stats || is_last_report) {
1805  const char end = is_last_report ? '\n' : '\r';
1806  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1807  fprintf(stderr, "%s %c", buf.str, end);
1808  } else
1809  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1810 
1811  fflush(stderr);
1812  }
1813  av_bprint_finalize(&buf, NULL);
1814 
1815  if (progress_avio) {
1816  av_bprintf(&buf_script, "progress=%s\n",
1817  is_last_report ? "end" : "continue");
1818  avio_write(progress_avio, buf_script.str,
1819  FFMIN(buf_script.len, buf_script.size - 1));
1820  avio_flush(progress_avio);
1821  av_bprint_finalize(&buf_script, NULL);
1822  if (is_last_report) {
1823  if ((ret = avio_closep(&progress_avio)) < 0)
1825  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1826  }
1827  }
1828 
1829  if (is_last_report)
1830  print_final_stats(total_size);
1831 }
1832 
1834 {
1835  // We never got any input. Set a fake format, which will
1836  // come from libavformat.
1837  ifilter->format = par->format;
1838  ifilter->sample_rate = par->sample_rate;
1839  ifilter->channels = par->channels;
1840  ifilter->channel_layout = par->channel_layout;
1841  ifilter->width = par->width;
1842  ifilter->height = par->height;
1843  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1844 }
1845 
1846 static void flush_encoders(void)
1847 {
1848  int i, ret;
1849 
1850  for (i = 0; i < nb_output_streams; i++) {
1851  OutputStream *ost = output_streams[i];
1852  AVCodecContext *enc = ost->enc_ctx;
1853  OutputFile *of = output_files[ost->file_index];
1854 
1855  if (!ost->encoding_needed)
1856  continue;
1857 
1858  // Try to enable encoding with no input frames.
1859  // Maybe we should just let encoding fail instead.
1860  if (!ost->initialized) {
1861  FilterGraph *fg = ost->filter->graph;
1862  char error[1024] = "";
1863 
1865  "Finishing stream %d:%d without any data written to it.\n",
1866  ost->file_index, ost->st->index);
1867 
1868  if (ost->filter && !fg->graph) {
1869  int x;
1870  for (x = 0; x < fg->nb_inputs; x++) {
1871  InputFilter *ifilter = fg->inputs[x];
1872  if (ifilter->format < 0)
1873  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1874  }
1875 
1877  continue;
1878 
1879  ret = configure_filtergraph(fg);
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1882  exit_program(1);
1883  }
1884 
1885  finish_output_stream(ost);
1886  }
1887 
1888  ret = init_output_stream(ost, error, sizeof(error));
1889  if (ret < 0) {
1890  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891  ost->file_index, ost->index, error);
1892  exit_program(1);
1893  }
1894  }
1895 
1897  continue;
1898 
1899  for (;;) {
1900  const char *desc = NULL;
1901  AVPacket pkt;
1902  int pkt_size;
1903 
1904  switch (enc->codec_type) {
1905  case AVMEDIA_TYPE_AUDIO:
1906  desc = "audio";
1907  break;
1908  case AVMEDIA_TYPE_VIDEO:
1909  desc = "video";
1910  break;
1911  default:
1912  av_assert0(0);
1913  }
1914 
1915  av_init_packet(&pkt);
1916  pkt.data = NULL;
1917  pkt.size = 0;
1918 
1920 
1921  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1922  ret = avcodec_send_frame(enc, NULL);
1923  if (ret < 0) {
1924  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1925  desc,
1926  av_err2str(ret));
1927  exit_program(1);
1928  }
1929  }
1930 
1931  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1932  if (ret < 0 && ret != AVERROR_EOF) {
1933  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1934  desc,
1935  av_err2str(ret));
1936  exit_program(1);
1937  }
1938  if (ost->logfile && enc->stats_out) {
1939  fprintf(ost->logfile, "%s", enc->stats_out);
1940  }
1941  if (ret == AVERROR_EOF) {
1942  output_packet(of, &pkt, ost, 1);
1943  break;
1944  }
1945  if (ost->finished & MUXER_FINISHED) {
1946  av_packet_unref(&pkt);
1947  continue;
1948  }
1949  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1950  pkt_size = pkt.size;
1951  output_packet(of, &pkt, ost, 0);
1953  do_video_stats(ost, pkt_size);
1954  }
1955  }
1956  }
1957 }
1958 
1959 /*
1960  * Check whether a packet from ist should be written into ost at this time
1961  */
1963 {
1964  OutputFile *of = output_files[ost->file_index];
1965  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1966 
1967  if (ost->source_index != ist_index)
1968  return 0;
1969 
1970  if (ost->finished)
1971  return 0;
1972 
1973  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1974  return 0;
1975 
1976  return 1;
1977 }
1978 
1979 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1980 {
1981  OutputFile *of = output_files[ost->file_index];
1982  InputFile *f = input_files [ist->file_index];
1983  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1984  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1985  AVPacket opkt;
1986 
1987  // EOF: flush output bitstream filters.
1988  if (!pkt) {
1989  av_init_packet(&opkt);
1990  opkt.data = NULL;
1991  opkt.size = 0;
1992  output_packet(of, &opkt, ost, 1);
1993  return;
1994  }
1995 
1996  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1998  return;
1999 
2000  if (!ost->frame_number && !ost->copy_prior_start) {
2001  int64_t comp_start = start_time;
2002  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2003  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2004  if (pkt->pts == AV_NOPTS_VALUE ?
2005  ist->pts < comp_start :
2006  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2007  return;
2008  }
2009 
2010  if (of->recording_time != INT64_MAX &&
2011  ist->pts >= of->recording_time + start_time) {
2012  close_output_stream(ost);
2013  return;
2014  }
2015 
2016  if (f->recording_time != INT64_MAX) {
2017  start_time = f->ctx->start_time;
2018  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2019  start_time += f->start_time;
2020  if (ist->pts >= f->recording_time + start_time) {
2021  close_output_stream(ost);
2022  return;
2023  }
2024  }
2025 
2026  /* force the input stream PTS */
2027  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2028  ost->sync_opts++;
2029 
2030  if (av_packet_ref(&opkt, pkt) < 0)
2031  exit_program(1);
2032 
2033  if (pkt->pts != AV_NOPTS_VALUE)
2034  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2035 
2036  if (pkt->dts == AV_NOPTS_VALUE) {
2037  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2038  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2040  if(!duration)
2041  duration = ist->dec_ctx->frame_size;
2042  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2043  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2045  /* dts will be set immediately afterwards to what pts is now */
2046  opkt.pts = opkt.dts - ost_tb_start_time;
2047  } else
2048  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2049  opkt.dts -= ost_tb_start_time;
2050 
2051  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2052 
2053  output_packet(of, &opkt, ost, 0);
2054 }
2055 
2057 {
2058  AVCodecContext *dec = ist->dec_ctx;
2059 
2060  if (!dec->channel_layout) {
2061  char layout_name[256];
2062 
2063  if (dec->channels > ist->guess_layout_max)
2064  return 0;
2066  if (!dec->channel_layout)
2067  return 0;
2068  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2069  dec->channels, dec->channel_layout);
2070  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2071  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2072  }
2073  return 1;
2074 }
2075 
2076 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2077 {
2078  if (*got_output || ret<0)
2079  decode_error_stat[ret<0] ++;
2080 
2081  if (ret < 0 && exit_on_error)
2082  exit_program(1);
2083 
2084  if (*got_output && ist) {
2087  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2088  if (exit_on_error)
2089  exit_program(1);
2090  }
2091  }
2092 }
2093 
2094 // Filters can be configured only if the formats of all inputs are known.
2096 {
2097  int i;
2098  for (i = 0; i < fg->nb_inputs; i++) {
2099  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2100  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2101  return 0;
2102  }
2103  return 1;
2104 }
2105 
2107 {
2108  FilterGraph *fg = ifilter->graph;
2109  int need_reinit, ret, i;
2110 
2111  /* determine if the parameters for this input changed */
2112  need_reinit = ifilter->format != frame->format;
2113 
2114  switch (ifilter->ist->st->codecpar->codec_type) {
2115  case AVMEDIA_TYPE_AUDIO:
2116  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2117  ifilter->channels != frame->channels ||
2118  ifilter->channel_layout != frame->channel_layout;
2119  break;
2120  case AVMEDIA_TYPE_VIDEO:
2121  need_reinit |= ifilter->width != frame->width ||
2122  ifilter->height != frame->height;
2123  break;
2124  }
2125 
2126  if (!ifilter->ist->reinit_filters && fg->graph)
2127  need_reinit = 0;
2128 
2129  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2130  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2131  need_reinit = 1;
2132 
2133  if (need_reinit) {
2134  ret = ifilter_parameters_from_frame(ifilter, frame);
2135  if (ret < 0)
2136  return ret;
2137  }
2138 
2139  /* (re)init the graph if possible, otherwise buffer the frame and return */
2140  if (need_reinit || !fg->graph) {
2141  for (i = 0; i < fg->nb_inputs; i++) {
2142  if (!ifilter_has_all_input_formats(fg)) {
2143  AVFrame *tmp = av_frame_clone(frame);
2144  if (!tmp)
2145  return AVERROR(ENOMEM);
2146  av_frame_unref(frame);
2147 
2148  if (!av_fifo_space(ifilter->frame_queue)) {
2149  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2150  if (ret < 0) {
2151  av_frame_free(&tmp);
2152  return ret;
2153  }
2154  }
2155  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2156  return 0;
2157  }
2158  }
2159 
2160  ret = reap_filters(1);
2161  if (ret < 0 && ret != AVERROR_EOF) {
2162  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2163  return ret;
2164  }
2165 
2166  ret = configure_filtergraph(fg);
2167  if (ret < 0) {
2168  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2169  return ret;
2170  }
2171  }
2172 
2174  if (ret < 0) {
2175  if (ret != AVERROR_EOF)
2176  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2177  return ret;
2178  }
2179 
2180  return 0;
2181 }
2182 
2183 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2184 {
2185  int ret;
2186 
2187  ifilter->eof = 1;
2188 
2189  if (ifilter->filter) {
2190  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2191  if (ret < 0)
2192  return ret;
2193  } else {
2194  // the filtergraph was never configured
2195  if (ifilter->format < 0)
2196  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2197  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2198  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2199  return AVERROR_INVALIDDATA;
2200  }
2201  }
2202 
2203  return 0;
2204 }
2205 
2206 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2207 // There is the following difference: if you got a frame, you must call
2208 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2209 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2210 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2211 {
2212  int ret;
2213 
2214  *got_frame = 0;
2215 
2216  if (pkt) {
2217  ret = avcodec_send_packet(avctx, pkt);
2218  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2219  // decoded frames with avcodec_receive_frame() until done.
2220  if (ret < 0 && ret != AVERROR_EOF)
2221  return ret;
2222  }
2223 
2224  ret = avcodec_receive_frame(avctx, frame);
2225  if (ret < 0 && ret != AVERROR(EAGAIN))
2226  return ret;
2227  if (ret >= 0)
2228  *got_frame = 1;
2229 
2230  return 0;
2231 }
2232 
2234 {
2235  int i, ret;
2236  AVFrame *f;
2237 
2238  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2239  for (i = 0; i < ist->nb_filters; i++) {
2240  if (i < ist->nb_filters - 1) {
2241  f = ist->filter_frame;
2242  ret = av_frame_ref(f, decoded_frame);
2243  if (ret < 0)
2244  break;
2245  } else
2246  f = decoded_frame;
2247  ret = ifilter_send_frame(ist->filters[i], f);
2248  if (ret == AVERROR_EOF)
2249  ret = 0; /* ignore */
2250  if (ret < 0) {
2252  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2253  break;
2254  }
2255  }
2256  return ret;
2257 }
2258 
2260  int *decode_failed)
2261 {
2263  AVCodecContext *avctx = ist->dec_ctx;
2264  int ret, err = 0;
2265  AVRational decoded_frame_tb;
2266 
2267  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2268  return AVERROR(ENOMEM);
2269  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2270  return AVERROR(ENOMEM);
2271  decoded_frame = ist->decoded_frame;
2272 
2274  ret = decode(avctx, decoded_frame, got_output, pkt);
2275  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2276  if (ret < 0)
2277  *decode_failed = 1;
2278 
2279  if (ret >= 0 && avctx->sample_rate <= 0) {
2280  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2281  ret = AVERROR_INVALIDDATA;
2282  }
2283 
2284  if (ret != AVERROR_EOF)
2285  check_decode_result(ist, got_output, ret);
2286 
2287  if (!*got_output || ret < 0)
2288  return ret;
2289 
2290  ist->samples_decoded += decoded_frame->nb_samples;
2291  ist->frames_decoded++;
2292 
2293  /* increment next_dts to use for the case where the input stream does not
2294  have timestamps or there are multiple frames in the packet */
2295  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2296  avctx->sample_rate;
2297  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2298  avctx->sample_rate;
2299 
2300  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2301  decoded_frame_tb = ist->st->time_base;
2302  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2303  decoded_frame->pts = pkt->pts;
2304  decoded_frame_tb = ist->st->time_base;
2305  }else {
2306  decoded_frame->pts = ist->dts;
2307  decoded_frame_tb = AV_TIME_BASE_Q;
2308  }
2309  if (decoded_frame->pts != AV_NOPTS_VALUE)
2310  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2311  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2312  (AVRational){1, avctx->sample_rate});
2313  ist->nb_samples = decoded_frame->nb_samples;
2314  err = send_frame_to_filters(ist, decoded_frame);
2315 
2317  av_frame_unref(decoded_frame);
2318  return err < 0 ? err : ret;
2319 }
2320 
2321 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2322  int *decode_failed)
2323 {
2325  int i, ret = 0, err = 0;
2326  int64_t best_effort_timestamp;
2327  int64_t dts = AV_NOPTS_VALUE;
2328  AVPacket avpkt;
2329 
2330  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2331  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2332  // skip the packet.
2333  if (!eof && pkt && pkt->size == 0)
2334  return 0;
2335 
2336  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2337  return AVERROR(ENOMEM);
2338  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2339  return AVERROR(ENOMEM);
2340  decoded_frame = ist->decoded_frame;
2341  if (ist->dts != AV_NOPTS_VALUE)
2342  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2343  if (pkt) {
2344  avpkt = *pkt;
2345  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2346  }
2347 
2348  // The old code used to set dts on the drain packet, which does not work
2349  // with the new API anymore.
2350  if (eof) {
2351  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2352  if (!new)
2353  return AVERROR(ENOMEM);
2354  ist->dts_buffer = new;
2355  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2356  }
2357 
2359  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2360  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2361  if (ret < 0)
2362  *decode_failed = 1;
2363 
2364  // The following line may be required in some cases where there is no parser
2365  // or the parser does not has_b_frames correctly
2366  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2367  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2368  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2369  } else
2371  "video_delay is larger in decoder than demuxer %d > %d.\n"
2372  "If you want to help, upload a sample "
2373  "of this file to https://streams.videolan.org/upload/ "
2374  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2375  ist->dec_ctx->has_b_frames,
2376  ist->st->codecpar->video_delay);
2377  }
2378 
2379  if (ret != AVERROR_EOF)
2380  check_decode_result(ist, got_output, ret);
2381 
2382  if (*got_output && ret >= 0) {
2383  if (ist->dec_ctx->width != decoded_frame->width ||
2384  ist->dec_ctx->height != decoded_frame->height ||
2385  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2386  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2387  decoded_frame->width,
2388  decoded_frame->height,
2389  decoded_frame->format,
2390  ist->dec_ctx->width,
2391  ist->dec_ctx->height,
2392  ist->dec_ctx->pix_fmt);
2393  }
2394  }
2395 
2396  if (!*got_output || ret < 0)
2397  return ret;
2398 
2399  if(ist->top_field_first>=0)
2400  decoded_frame->top_field_first = ist->top_field_first;
2401 
2402  ist->frames_decoded++;
2403 
2404  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2405  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2406  if (err < 0)
2407  goto fail;
2408  }
2409  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2410 
2411  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2412  *duration_pts = decoded_frame->pkt_duration;
2413 
2414  if (ist->framerate.num)
2415  best_effort_timestamp = ist->cfr_next_pts++;
2416 
2417  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2418  best_effort_timestamp = ist->dts_buffer[0];
2419 
2420  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2421  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2422  ist->nb_dts_buffer--;
2423  }
2424 
2425  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2426  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2427 
2428  if (ts != AV_NOPTS_VALUE)
2429  ist->next_pts = ist->pts = ts;
2430  }
2431 
2432  if (debug_ts) {
2433  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2434  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2435  ist->st->index, av_ts2str(decoded_frame->pts),
2436  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2437  best_effort_timestamp,
2438  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2439  decoded_frame->key_frame, decoded_frame->pict_type,
2440  ist->st->time_base.num, ist->st->time_base.den);
2441  }
2442 
2443  if (ist->st->sample_aspect_ratio.num)
2444  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2445 
2446  err = send_frame_to_filters(ist, decoded_frame);
2447 
2448 fail:
2450  av_frame_unref(decoded_frame);
2451  return err < 0 ? err : ret;
2452 }
2453 
2455  int *decode_failed)
2456 {
2458  int free_sub = 1;
2459  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2460  &subtitle, got_output, pkt);
2461 
2462  check_decode_result(NULL, got_output, ret);
2463 
2464  if (ret < 0 || !*got_output) {
2465  *decode_failed = 1;
2466  if (!pkt->size)
2467  sub2video_flush(ist);
2468  return ret;
2469  }
2470 
2471  if (ist->fix_sub_duration) {
2472  int end = 1;
2473  if (ist->prev_sub.got_output) {
2474  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2475  1000, AV_TIME_BASE);
2476  if (end < ist->prev_sub.subtitle.end_display_time) {
2477  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2478  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2480  end <= 0 ? ", dropping it" : "");
2482  }
2483  }
2484  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2485  FFSWAP(int, ret, ist->prev_sub.ret);
2486  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2487  if (end <= 0)
2488  goto out;
2489  }
2490 
2491  if (!*got_output)
2492  return ret;
2493 
2494  if (ist->sub2video.frame) {
2495  sub2video_update(ist, INT64_MIN, &subtitle);
2496  } else if (ist->nb_filters) {
2497  if (!ist->sub2video.sub_queue)
2498  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2499  if (!ist->sub2video.sub_queue)
2500  exit_program(1);
2501  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2503  if (ret < 0)
2504  exit_program(1);
2505  }
2506  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2507  free_sub = 0;
2508  }
2509 
2510  if (!subtitle.num_rects)
2511  goto out;
2512 
2513  ist->frames_decoded++;
2514 
2515  for (i = 0; i < nb_output_streams; i++) {
2516  OutputStream *ost = output_streams[i];
2517 
2518  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2519  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2520  continue;
2521 
2522  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2523  }
2524 
2525 out:
2526  if (free_sub)
2527  avsubtitle_free(&subtitle);
2528  return ret;
2529 }
2530 
2532 {
2533  int i, ret;
2534  /* TODO keep pts also in stream time base to avoid converting back */
2535  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2537 
2538  for (i = 0; i < ist->nb_filters; i++) {
2539  ret = ifilter_send_eof(ist->filters[i], pts);
2540  if (ret < 0)
2541  return ret;
2542  }
2543  return 0;
2544 }
2545 
2546 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2547 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2548 {
2549  int ret = 0, i;
2550  int repeating = 0;
2551  int eof_reached = 0;
2552 
2553  AVPacket avpkt;
2554  if (!ist->saw_first_ts) {
2555  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2556  ist->pts = 0;
2557  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2558  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2559  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2560  }
2561  ist->saw_first_ts = 1;
2562  }
2563 
2564  if (ist->next_dts == AV_NOPTS_VALUE)
2565  ist->next_dts = ist->dts;
2566  if (ist->next_pts == AV_NOPTS_VALUE)
2567  ist->next_pts = ist->pts;
2568 
2569  if (!pkt) {
2570  /* EOF handling */
2571  av_init_packet(&avpkt);
2572  avpkt.data = NULL;
2573  avpkt.size = 0;
2574  } else {
2575  avpkt = *pkt;
2576  }
2577 
2578  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2579  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2580  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2581  ist->next_pts = ist->pts = ist->dts;
2582  }
2583 
2584  // while we have more to decode or while the decoder did output something on EOF
2585  while (ist->decoding_needed) {
2586  int64_t duration_dts = 0;
2587  int64_t duration_pts = 0;
2588  int got_output = 0;
2589  int decode_failed = 0;
2590 
2591  ist->pts = ist->next_pts;
2592  ist->dts = ist->next_dts;
2593 
2594  switch (ist->dec_ctx->codec_type) {
2595  case AVMEDIA_TYPE_AUDIO:
2596  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2597  &decode_failed);
2598  break;
2599  case AVMEDIA_TYPE_VIDEO:
2600  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2601  &decode_failed);
2602  if (!repeating || !pkt || got_output) {
2603  if (pkt && pkt->duration) {
2604  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2605  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2607  duration_dts = ((int64_t)AV_TIME_BASE *
2608  ist->dec_ctx->framerate.den * ticks) /
2610  }
2611 
2612  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2613  ist->next_dts += duration_dts;
2614  }else
2615  ist->next_dts = AV_NOPTS_VALUE;
2616  }
2617 
2618  if (got_output) {
2619  if (duration_pts > 0) {
2620  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2621  } else {
2622  ist->next_pts += duration_dts;
2623  }
2624  }
2625  break;
2626  case AVMEDIA_TYPE_SUBTITLE:
2627  if (repeating)
2628  break;
2629  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2630  if (!pkt && ret >= 0)
2631  ret = AVERROR_EOF;
2632  break;
2633  default:
2634  return -1;
2635  }
2636 
2637  if (ret == AVERROR_EOF) {
2638  eof_reached = 1;
2639  break;
2640  }
2641 
2642  if (ret < 0) {
2643  if (decode_failed) {
2644  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2645  ist->file_index, ist->st->index, av_err2str(ret));
2646  } else {
2647  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2648  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2649  }
2650  if (!decode_failed || exit_on_error)
2651  exit_program(1);
2652  break;
2653  }
2654 
2655  if (got_output)
2656  ist->got_output = 1;
2657 
2658  if (!got_output)
2659  break;
2660 
2661  // During draining, we might get multiple output frames in this loop.
2662  // ffmpeg.c does not drain the filter chain on configuration changes,
2663  // which means if we send multiple frames at once to the filters, and
2664  // one of those frames changes configuration, the buffered frames will
2665  // be lost. This can upset certain FATE tests.
2666  // Decode only 1 frame per call on EOF to appease these FATE tests.
2667  // The ideal solution would be to rewrite decoding to use the new
2668  // decoding API in a better way.
2669  if (!pkt)
2670  break;
2671 
2672  repeating = 1;
2673  }
2674 
2675  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2676  /* except when looping we need to flush but not to send an EOF */
2677  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2678  int ret = send_filter_eof(ist);
2679  if (ret < 0) {
2680  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2681  exit_program(1);
2682  }
2683  }
2684 
2685  /* handle stream copy */
2686  if (!ist->decoding_needed && pkt) {
2687  ist->dts = ist->next_dts;
2688  switch (ist->dec_ctx->codec_type) {
2689  case AVMEDIA_TYPE_AUDIO:
2690  av_assert1(pkt->duration >= 0);
2691  if (ist->dec_ctx->sample_rate) {
2692  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2693  ist->dec_ctx->sample_rate;
2694  } else {
2695  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2696  }
2697  break;
2698  case AVMEDIA_TYPE_VIDEO:
2699  if (ist->framerate.num) {
2700  // TODO: Remove work-around for c99-to-c89 issue 7
2701  AVRational time_base_q = AV_TIME_BASE_Q;
2702  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2703  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2704  } else if (pkt->duration) {
2705  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2706  } else if(ist->dec_ctx->framerate.num != 0) {
2707  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2708  ist->next_dts += ((int64_t)AV_TIME_BASE *
2709  ist->dec_ctx->framerate.den * ticks) /
2711  }
2712  break;
2713  }
2714  ist->pts = ist->dts;
2715  ist->next_pts = ist->next_dts;
2716  }
2717  for (i = 0; i < nb_output_streams; i++) {
2718  OutputStream *ost = output_streams[i];
2719 
2720  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2721  continue;
2722 
2723  do_streamcopy(ist, ost, pkt);
2724  }
2725 
2726  return !eof_reached;
2727 }
2728 
2729 static void print_sdp(void)
2730 {
2731  char sdp[16384];
2732  int i;
2733  int j;
2734  AVIOContext *sdp_pb;
2735  AVFormatContext **avc;
2736 
2737  for (i = 0; i < nb_output_files; i++) {
2738  if (!output_files[i]->header_written)
2739  return;
2740  }
2741 
2742  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2743  if (!avc)
2744  exit_program(1);
2745  for (i = 0, j = 0; i < nb_output_files; i++) {
2746  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2747  avc[j] = output_files[i]->ctx;
2748  j++;
2749  }
2750  }
2751 
2752  if (!j)
2753  goto fail;
2754 
2755  av_sdp_create(avc, j, sdp, sizeof(sdp));
2756 
2757  if (!sdp_filename) {
2758  printf("SDP:\n%s\n", sdp);
2759  fflush(stdout);
2760  } else {
2761  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2762  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2763  } else {
2764  avio_print(sdp_pb, sdp);
2765  avio_closep(&sdp_pb);
2767  }
2768  }
2769 
2770 fail:
2771  av_freep(&avc);
2772 }
2773 
2775 {
2776  InputStream *ist = s->opaque;
2777  const enum AVPixelFormat *p;
2778  int ret;
2779 
2780  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2782  const AVCodecHWConfig *config = NULL;
2783  int i;
2784 
2785  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2786  break;
2787 
2788  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2789  ist->hwaccel_id == HWACCEL_AUTO) {
2790  for (i = 0;; i++) {
2791  config = avcodec_get_hw_config(s->codec, i);
2792  if (!config)
2793  break;
2794  if (!(config->methods &
2796  continue;
2797  if (config->pix_fmt == *p)
2798  break;
2799  }
2800  }
2801  if (config) {
2802  if (config->device_type != ist->hwaccel_device_type) {
2803  // Different hwaccel offered, ignore.
2804  continue;
2805  }
2806 
2807  ret = hwaccel_decode_init(s);
2808  if (ret < 0) {
2809  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2811  "%s hwaccel requested for input stream #%d:%d, "
2812  "but cannot be initialized.\n",
2814  ist->file_index, ist->st->index);
2815  return AV_PIX_FMT_NONE;
2816  }
2817  continue;
2818  }
2819  } else {
2820  const HWAccel *hwaccel = NULL;
2821  int i;
2822  for (i = 0; hwaccels[i].name; i++) {
2823  if (hwaccels[i].pix_fmt == *p) {
2824  hwaccel = &hwaccels[i];
2825  break;
2826  }
2827  }
2828  if (!hwaccel) {
2829  // No hwaccel supporting this pixfmt.
2830  continue;
2831  }
2832  if (hwaccel->id != ist->hwaccel_id) {
2833  // Does not match requested hwaccel.
2834  continue;
2835  }
2836 
2837  ret = hwaccel->init(s);
2838  if (ret < 0) {
2840  "%s hwaccel requested for input stream #%d:%d, "
2841  "but cannot be initialized.\n", hwaccel->name,
2842  ist->file_index, ist->st->index);
2843  return AV_PIX_FMT_NONE;
2844  }
2845  }
2846 
2847  if (ist->hw_frames_ctx) {
2849  if (!s->hw_frames_ctx)
2850  return AV_PIX_FMT_NONE;
2851  }
2852 
2853  ist->hwaccel_pix_fmt = *p;
2854  break;
2855  }
2856 
2857  return *p;
2858 }
2859 
2861 {
2862  InputStream *ist = s->opaque;
2863 
2864  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2865  return ist->hwaccel_get_buffer(s, frame, flags);
2866 
2867  return avcodec_default_get_buffer2(s, frame, flags);
2868 }
2869 
2870 static int init_input_stream(int ist_index, char *error, int error_len)
2871 {
2872  int ret;
2873  InputStream *ist = input_streams[ist_index];
2874 
2875  if (ist->decoding_needed) {
2876  AVCodec *codec = ist->dec;
2877  if (!codec) {
2878  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2879  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2880  return AVERROR(EINVAL);
2881  }
2882 
2883  ist->dec_ctx->opaque = ist;
2884  ist->dec_ctx->get_format = get_format;
2885  ist->dec_ctx->get_buffer2 = get_buffer;
2886  ist->dec_ctx->thread_safe_callbacks = 1;
2887 
2888  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2889  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2890  (ist->decoding_needed & DECODING_FOR_OST)) {
2891  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2893  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2894  }
2895 
2896  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2897 
2898  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2899  * audio, and video decoders such as cuvid or mediacodec */
2900  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2901 
2902  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2903  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2904  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2906  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2907 
2908  ret = hw_device_setup_for_decode(ist);
2909  if (ret < 0) {
2910  snprintf(error, error_len, "Device setup failed for "
2911  "decoder on input stream #%d:%d : %s",
2912  ist->file_index, ist->st->index, av_err2str(ret));
2913  return ret;
2914  }
2915 
2916  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2917  if (ret == AVERROR_EXPERIMENTAL)
2918  abort_codec_experimental(codec, 0);
2919 
2920  snprintf(error, error_len,
2921  "Error while opening decoder for input stream "
2922  "#%d:%d : %s",
2923  ist->file_index, ist->st->index, av_err2str(ret));
2924  return ret;
2925  }
2927  }
2928 
2929  ist->next_pts = AV_NOPTS_VALUE;
2930  ist->next_dts = AV_NOPTS_VALUE;
2931 
2932  return 0;
2933 }
2934 
2936 {
2937  if (ost->source_index >= 0)
2938  return input_streams[ost->source_index];
2939  return NULL;
2940 }
2941 
2942 static int compare_int64(const void *a, const void *b)
2943 {
2944  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2945 }
2946 
2947 /* open the muxer when all the streams are initialized */
2949 {
2950  int ret, i;
2951 
2952  for (i = 0; i < of->ctx->nb_streams; i++) {
2953  OutputStream *ost = output_streams[of->ost_index + i];
2954  if (!ost->initialized)
2955  return 0;
2956  }
2957 
2958  of->ctx->interrupt_callback = int_cb;
2959 
2960  ret = avformat_write_header(of->ctx, &of->opts);
2961  if (ret < 0) {
2963  "Could not write header for output file #%d "
2964  "(incorrect codec parameters ?): %s\n",
2965  file_index, av_err2str(ret));
2966  return ret;
2967  }
2968  //assert_avoptions(of->opts);
2969  of->header_written = 1;
2970 
2971  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2972 
2973  if (sdp_filename || want_sdp)
2974  print_sdp();
2975 
2976  /* flush the muxing queues */
2977  for (i = 0; i < of->ctx->nb_streams; i++) {
2978  OutputStream *ost = output_streams[of->ost_index + i];
2979 
2980  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2981  if (!av_fifo_size(ost->muxing_queue))
2982  ost->mux_timebase = ost->st->time_base;
2983 
2984  while (av_fifo_size(ost->muxing_queue)) {
2985  AVPacket pkt;
2986  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2987  write_packet(of, &pkt, ost, 1);
2988  }
2989  }
2990 
2991  return 0;
2992 }
2993 
2995 {
2996  AVBSFContext *ctx = ost->bsf_ctx;
2997  int ret;
2998 
2999  if (!ctx)
3000  return 0;
3001 
3002  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3003  if (ret < 0)
3004  return ret;
3005 
3006  ctx->time_base_in = ost->st->time_base;
3007 
3008  ret = av_bsf_init(ctx);
3009  if (ret < 0) {
3010  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3011  ctx->filter->name);
3012  return ret;
3013  }
3014 
3015  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3016  if (ret < 0)
3017  return ret;
3018  ost->st->time_base = ctx->time_base_out;
3019 
3020  return 0;
3021 }
3022 
3024 {
3025  OutputFile *of = output_files[ost->file_index];
3026  InputStream *ist = get_input_stream(ost);
3027  AVCodecParameters *par_dst = ost->st->codecpar;
3028  AVCodecParameters *par_src = ost->ref_par;
3029  AVRational sar;
3030  int i, ret;
3031  uint32_t codec_tag = par_dst->codec_tag;
3032 
3033  av_assert0(ist && !ost->filter);
3034 
3035  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3036  if (ret >= 0)
3037  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3038  if (ret < 0) {
3040  "Error setting up codec context options.\n");
3041  return ret;
3042  }
3043 
3044  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3045  if (ret < 0) {
3047  "Error getting reference codec parameters.\n");
3048  return ret;
3049  }
3050 
3051  if (!codec_tag) {
3052  unsigned int codec_tag_tmp;
3053  if (!of->ctx->oformat->codec_tag ||
3054  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3055  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3056  codec_tag = par_src->codec_tag;
3057  }
3058 
3059  ret = avcodec_parameters_copy(par_dst, par_src);
3060  if (ret < 0)
3061  return ret;
3062 
3063  par_dst->codec_tag = codec_tag;
3064 
3065  if (!ost->frame_rate.num)
3066  ost->frame_rate = ist->framerate;
3067  ost->st->avg_frame_rate = ost->frame_rate;
3068 
3070  if (ret < 0)
3071  return ret;
3072 
3073  // copy timebase while removing common factors
3074  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3076 
3077  // copy estimated duration as a hint to the muxer
3078  if (ost->st->duration <= 0 && ist->st->duration > 0)
3079  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3080 
3081  // copy disposition
3082  ost->st->disposition = ist->st->disposition;
3083 
3084  if (ist->st->nb_side_data) {
3085  for (i = 0; i < ist->st->nb_side_data; i++) {
3086  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3087  uint8_t *dst_data;
3088 
3089  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3090  if (!dst_data)
3091  return AVERROR(ENOMEM);
3092  memcpy(dst_data, sd_src->data, sd_src->size);
3093  }
3094  }
3095 
3096  if (ost->rotate_overridden) {
3098  sizeof(int32_t) * 9);
3099  if (sd)
3101  }
3102 
3103  switch (par_dst->codec_type) {
3104  case AVMEDIA_TYPE_AUDIO:
3105  if (audio_volume != 256) {
3106  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3107  exit_program(1);
3108  }
3109  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3110  par_dst->block_align= 0;
3111  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3112  par_dst->block_align= 0;
3113  break;
3114  case AVMEDIA_TYPE_VIDEO:
3115  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3116  sar =
3118  (AVRational){ par_dst->height, par_dst->width });
3119  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3120  "with stream copy may produce invalid files\n");
3121  }
3122  else if (ist->st->sample_aspect_ratio.num)
3123  sar = ist->st->sample_aspect_ratio;
3124  else
3125  sar = par_src->sample_aspect_ratio;
3126  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3127  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3128  ost->st->r_frame_rate = ist->st->r_frame_rate;
3129  break;
3130  }
3131 
3132  ost->mux_timebase = ist->st->time_base;
3133 
3134  return 0;
3135 }
3136 
3138 {
3139  AVDictionaryEntry *e;
3140 
3141  uint8_t *encoder_string;
3142  int encoder_string_len;
3143  int format_flags = 0;
3144  int codec_flags = ost->enc_ctx->flags;
3145 
3146  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3147  return;
3148 
3149  e = av_dict_get(of->opts, "fflags", NULL, 0);
3150  if (e) {
3151  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3152  if (!o)
3153  return;
3154  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3155  }
3156  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3157  if (e) {
3158  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3159  if (!o)
3160  return;
3161  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3162  }
3163 
3164  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3165  encoder_string = av_mallocz(encoder_string_len);
3166  if (!encoder_string)
3167  exit_program(1);
3168 
3169  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3170  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3171  else
3172  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3173  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3174  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3176 }
3177 
3178 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3179  AVCodecContext *avctx)
3180 {
3181  char *p;
3182  int n = 1, i, size, index = 0;
3183  int64_t t, *pts;
3184 
3185  for (p = kf; *p; p++)
3186  if (*p == ',')
3187  n++;
3188  size = n;
3189  pts = av_malloc_array(size, sizeof(*pts));
3190  if (!pts) {
3191  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3192  exit_program(1);
3193  }
3194 
3195  p = kf;
3196  for (i = 0; i < n; i++) {
3197  char *next = strchr(p, ',');
3198 
3199  if (next)
3200  *next++ = 0;
3201 
3202  if (!memcmp(p, "chapters", 8)) {
3203 
3204  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3205  int j;
3206 
3207  if (avf->nb_chapters > INT_MAX - size ||
3208  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3209  sizeof(*pts)))) {
3211  "Could not allocate forced key frames array.\n");
3212  exit_program(1);
3213  }
3214  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3215  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3216 
3217  for (j = 0; j < avf->nb_chapters; j++) {
3218  AVChapter *c = avf->chapters[j];
3219  av_assert1(index < size);
3220  pts[index++] = av_rescale_q(c->start, c->time_base,
3221  avctx->time_base) + t;
3222  }
3223 
3224  } else {
3225 
3226  t = parse_time_or_die("force_key_frames", p, 1);
3227  av_assert1(index < size);
3228  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3229 
3230  }
3231 
3232  p = next;
3233  }
3234 
3235  av_assert0(index == size);
3236  qsort(pts, size, sizeof(*pts), compare_int64);
3237  ost->forced_kf_count = size;
3238  ost->forced_kf_pts = pts;
3239 }
3240 
3241 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3242 {
3243  InputStream *ist = get_input_stream(ost);
3244  AVCodecContext *enc_ctx = ost->enc_ctx;
3245  AVFormatContext *oc;
3246 
3247  if (ost->enc_timebase.num > 0) {
3248  enc_ctx->time_base = ost->enc_timebase;
3249  return;
3250  }
3251 
3252  if (ost->enc_timebase.num < 0) {
3253  if (ist) {
3254  enc_ctx->time_base = ist->st->time_base;
3255  return;
3256  }
3257 
3258  oc = output_files[ost->file_index]->ctx;
3259  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3260  }
3261 
3262  enc_ctx->time_base = default_time_base;
3263 }
3264 
3266 {
3267  InputStream *ist = get_input_stream(ost);
3268  AVCodecContext *enc_ctx = ost->enc_ctx;
3270  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3271  int j, ret;
3272 
3273  set_encoder_id(output_files[ost->file_index], ost);
3274 
3275  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3276  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3277  // which have to be filtered out to prevent leaking them to output files.
3278  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3279 
3280  if (ist) {
3281  ost->st->disposition = ist->st->disposition;
3282 
3283  dec_ctx = ist->dec_ctx;
3284 
3285  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3286  } else {
3287  for (j = 0; j < oc->nb_streams; j++) {
3288  AVStream *st = oc->streams[j];
3289  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3290  break;
3291  }
3292  if (j == oc->nb_streams)
3293  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3296  }
3297 
3298  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3299  if (!ost->frame_rate.num)
3301  if (ist && !ost->frame_rate.num)
3302  ost->frame_rate = ist->framerate;
3303  if (ist && !ost->frame_rate.num)
3304  ost->frame_rate = ist->st->r_frame_rate;
3305  if (ist && !ost->frame_rate.num) {
3306  ost->frame_rate = (AVRational){25, 1};
3308  "No information "
3309  "about the input framerate is available. Falling "
3310  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3311  "if you want a different framerate.\n",
3312  ost->file_index, ost->index);
3313  }
3314 
3315  if (ost->enc->supported_framerates && !ost->force_fps) {
3316  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3317  ost->frame_rate = ost->enc->supported_framerates[idx];
3318  }
3319  // reduce frame rate for mpeg4 to be within the spec limits
3320  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3321  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3322  ost->frame_rate.num, ost->frame_rate.den, 65535);
3323  }
3324  }
3325 
3326  switch (enc_ctx->codec_type) {
3327  case AVMEDIA_TYPE_AUDIO:
3329  if (dec_ctx)
3330  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3331  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3335 
3336  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3337  break;
3338 
3339  case AVMEDIA_TYPE_VIDEO:
3341 
3342  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3344  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3346  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3347  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3348  }
3349 
3350  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3351  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3352  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3353  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3354  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3356 
3357  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3358  if (dec_ctx)
3359  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3360  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3361 
3362  enc_ctx->framerate = ost->frame_rate;
3363 
3364  ost->st->avg_frame_rate = ost->frame_rate;
3365 
3366  if (!dec_ctx ||
3367  enc_ctx->width != dec_ctx->width ||
3368  enc_ctx->height != dec_ctx->height ||
3369  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3371  }
3372 
3373  if (ost->top_field_first == 0) {
3374  enc_ctx->field_order = AV_FIELD_BB;
3375  } else if (ost->top_field_first == 1) {
3376  enc_ctx->field_order = AV_FIELD_TT;
3377  }
3378 
3379  if (ost->forced_keyframes) {
3380  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3383  if (ret < 0) {
3385  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3386  return ret;
3387  }
3392 
3393  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3394  // parse it only for static kf timings
3395  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3397  }
3398  }
3399  break;
3400  case AVMEDIA_TYPE_SUBTITLE:
3401  enc_ctx->time_base = AV_TIME_BASE_Q;
3402  if (!enc_ctx->width) {
3403  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3404  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3405  }
3406  break;
3407  case AVMEDIA_TYPE_DATA:
3408  break;
3409  default:
3410  abort();
3411  break;
3412  }
3413 
3414  ost->mux_timebase = enc_ctx->time_base;
3415 
3416  return 0;
3417 }
3418 
3419 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3420 {
3421  int ret = 0;
3422 
3423  if (ost->encoding_needed) {
3424  AVCodec *codec = ost->enc;
3425  AVCodecContext *dec = NULL;
3426  InputStream *ist;
3427 
3428  ret = init_output_stream_encode(ost);
3429  if (ret < 0)
3430  return ret;
3431 
3432  if ((ist = get_input_stream(ost)))
3433  dec = ist->dec_ctx;
3434  if (dec && dec->subtitle_header) {
3435  /* ASS code assumes this buffer is null terminated so add extra byte. */
3437  if (!ost->enc_ctx->subtitle_header)
3438  return AVERROR(ENOMEM);
3439  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3441  }
3442  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3443  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3444  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3445  !codec->defaults &&
3446  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3447  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3448  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3449 
3450  ret = hw_device_setup_for_encode(ost);
3451  if (ret < 0) {
3452  snprintf(error, error_len, "Device setup failed for "
3453  "encoder on output stream #%d:%d : %s",
3454  ost->file_index, ost->index, av_err2str(ret));
3455  return ret;
3456  }
3457 
3458  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3459  int input_props = 0, output_props = 0;
3460  AVCodecDescriptor const *input_descriptor =
3462  AVCodecDescriptor const *output_descriptor =
3464  if (input_descriptor)
3465  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3466  if (output_descriptor)
3467  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3468  if (input_props && output_props && input_props != output_props) {
3469  snprintf(error, error_len,
3470  "Subtitle encoding currently only possible from text to text "
3471  "or bitmap to bitmap");
3472  return AVERROR_INVALIDDATA;
3473  }
3474  }
3475 
3476  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3477  if (ret == AVERROR_EXPERIMENTAL)
3478  abort_codec_experimental(codec, 1);
3479  snprintf(error, error_len,
3480  "Error while opening encoder for output stream #%d:%d - "
3481  "maybe incorrect parameters such as bit_rate, rate, width or height",
3482  ost->file_index, ost->index);
3483  return ret;
3484  }
3485  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3486  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3488  ost->enc_ctx->frame_size);
3490  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3491  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3492  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3493  " It takes bits/s as argument, not kbits/s\n");
3494 
3496  if (ret < 0) {
3498  "Error initializing the output stream codec context.\n");
3499  exit_program(1);
3500  }
3501  /*
3502  * FIXME: ost->st->codec should't be needed here anymore.
3503  */
3504  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3505  if (ret < 0)
3506  return ret;
3507 
3508  if (ost->enc_ctx->nb_coded_side_data) {
3509  int i;
3510 
3511  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3512  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3513  uint8_t *dst_data;
3514 
3515  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3516  if (!dst_data)
3517  return AVERROR(ENOMEM);
3518  memcpy(dst_data, sd_src->data, sd_src->size);
3519  }
3520  }
3521 
3522  /*
3523  * Add global input side data. For now this is naive, and copies it
3524  * from the input stream's global side data. All side data should
3525  * really be funneled over AVFrame and libavfilter, then added back to
3526  * packet side data, and then potentially using the first packet for
3527  * global side data.
3528  */
3529  if (ist) {
3530  int i;
3531  for (i = 0; i < ist->st->nb_side_data; i++) {
3532  AVPacketSideData *sd = &ist->st->side_data[i];
3533  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3534  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3535  if (!dst)
3536  return AVERROR(ENOMEM);
3537  memcpy(dst, sd->data, sd->size);
3538  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3539  av_display_rotation_set((uint32_t *)dst, 0);
3540  }
3541  }
3542  }
3543 
3544  // copy timebase while removing common factors
3545  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3546  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3547 
3548  // copy estimated duration as a hint to the muxer
3549  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3550  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3551 
3552  ost->st->codec->codec= ost->enc_ctx->codec;
3553  } else if (ost->stream_copy) {
3554  ret = init_output_stream_streamcopy(ost);
3555  if (ret < 0)
3556  return ret;
3557  }
3558 
3559  // parse user provided disposition, and update stream values
3560  if (ost->disposition) {
3561  static const AVOption opts[] = {
3562  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3563  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3564  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3565  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3566  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3567  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3568  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3569  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3570  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3571  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3572  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3573  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3574  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3575  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3576  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3577  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3578  { NULL },
3579  };
3580  static const AVClass class = {
3581  .class_name = "",
3582  .item_name = av_default_item_name,
3583  .option = opts,
3584  .version = LIBAVUTIL_VERSION_INT,
3585  };
3586  const AVClass *pclass = &class;
3587 
3588  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3589  if (ret < 0)
3590  return ret;
3591  }
3592 
3593  /* initialize bitstream filters for the output stream
3594  * needs to be done here, because the codec id for streamcopy is not
3595  * known until now */
3596  ret = init_output_bsfs(ost);
3597  if (ret < 0)
3598  return ret;
3599 
3600  ost->initialized = 1;
3601 
3602  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3603  if (ret < 0)
3604  return ret;
3605 
3606  return ret;
3607 }
3608 
3609 static void report_new_stream(int input_index, AVPacket *pkt)
3610 {
3611  InputFile *file = input_files[input_index];
3612  AVStream *st = file->ctx->streams[pkt->stream_index];
3613 
3614  if (pkt->stream_index < file->nb_streams_warn)
3615  return;
3616  av_log(file->ctx, AV_LOG_WARNING,
3617  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3619  input_index, pkt->stream_index,
3620  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3621  file->nb_streams_warn = pkt->stream_index + 1;
3622 }
3623 
3624 static int transcode_init(void)
3625 {
3626  int ret = 0, i, j, k;
3627  AVFormatContext *oc;
3628  OutputStream *ost;
3629  InputStream *ist;
3630  char error[1024] = {0};
3631 
3632  for (i = 0; i < nb_filtergraphs; i++) {
3633  FilterGraph *fg = filtergraphs[i];
3634  for (j = 0; j < fg->nb_outputs; j++) {
3635  OutputFilter *ofilter = fg->outputs[j];
3636  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3637  continue;
3638  if (fg->nb_inputs != 1)
3639  continue;
3640  for (k = nb_input_streams-1; k >= 0 ; k--)
3641  if (fg->inputs[0]->ist == input_streams[k])
3642  break;
3643  ofilter->ost->source_index = k;
3644  }
3645  }
3646 
3647  /* init framerate emulation */
3648  for (i = 0; i < nb_input_files; i++) {
3649  InputFile *ifile = input_files[i];
3650  if (ifile->rate_emu)
3651  for (j = 0; j < ifile->nb_streams; j++)
3652  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3653  }
3654 
3655  /* init input streams */
3656  for (i = 0; i < nb_input_streams; i++)
3657  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3658  for (i = 0; i < nb_output_streams; i++) {
3659  ost = output_streams[i];
3660  avcodec_close(ost->enc_ctx);
3661  }
3662  goto dump_format;
3663  }
3664 
3665  /* open each encoder */
3666  for (i = 0; i < nb_output_streams; i++) {
3667  // skip streams fed from filtergraphs until we have a frame for them
3668  if (output_streams[i]->filter)
3669  continue;
3670 
3671  ret = init_output_stream(output_streams[i], error, sizeof(error));
3672  if (ret < 0)
3673  goto dump_format;
3674  }
3675 
3676  /* discard unused programs */
3677  for (i = 0; i < nb_input_files; i++) {
3678  InputFile *ifile = input_files[i];
3679  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3680  AVProgram *p = ifile->ctx->programs[j];
3681  int discard = AVDISCARD_ALL;
3682 
3683  for (k = 0; k < p->nb_stream_indexes; k++)
3684  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3685  discard = AVDISCARD_DEFAULT;
3686  break;
3687  }
3688  p->discard = discard;
3689  }
3690  }
3691 
3692  /* write headers for files with no streams */
3693  for (i = 0; i < nb_output_files; i++) {
3694  oc = output_files[i]->ctx;
3695  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3696  ret = check_init_output_file(output_files[i], i);
3697  if (ret < 0)
3698  goto dump_format;
3699  }
3700  }
3701 
3702  dump_format:
3703  /* dump the stream mapping */
3704  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3705  for (i = 0; i < nb_input_streams; i++) {
3706  ist = input_streams[i];
3707 
3708  for (j = 0; j < ist->nb_filters; j++) {
3709  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3710  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3711  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3712  ist->filters[j]->name);
3713  if (nb_filtergraphs > 1)
3714  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3715  av_log(NULL, AV_LOG_INFO, "\n");
3716  }
3717  }
3718  }
3719 
3720  for (i = 0; i < nb_output_streams; i++) {
3721  ost = output_streams[i];
3722 
3723  if (ost->attachment_filename) {
3724  /* an attached file */
3725  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3726  ost->attachment_filename, ost->file_index, ost->index);
3727  continue;
3728  }
3729 
3730  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3731  /* output from a complex graph */
3732  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3733  if (nb_filtergraphs > 1)
3734  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3735 
3736  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3737  ost->index, ost->enc ? ost->enc->name : "?");
3738  continue;
3739  }
3740 
3741  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3742  input_streams[ost->source_index]->file_index,
3743  input_streams[ost->source_index]->st->index,
3744  ost->file_index,
3745  ost->index);
3746  if (ost->sync_ist != input_streams[ost->source_index])
3747  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3748  ost->sync_ist->file_index,
3749  ost->sync_ist->st->index);
3750  if (ost->stream_copy)
3751  av_log(NULL, AV_LOG_INFO, " (copy)");
3752  else {
3753  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3754  const AVCodec *out_codec = ost->enc;
3755  const char *decoder_name = "?";
3756  const char *in_codec_name = "?";
3757  const char *encoder_name = "?";
3758  const char *out_codec_name = "?";
3759  const AVCodecDescriptor *desc;
3760 
3761  if (in_codec) {
3762  decoder_name = in_codec->name;
3763  desc = avcodec_descriptor_get(in_codec->id);
3764  if (desc)
3765  in_codec_name = desc->name;
3766  if (!strcmp(decoder_name, in_codec_name))
3767  decoder_name = "native";
3768  }
3769 
3770  if (out_codec) {
3771  encoder_name = out_codec->name;
3772  desc = avcodec_descriptor_get(out_codec->id);
3773  if (desc)
3774  out_codec_name = desc->name;
3775  if (!strcmp(encoder_name, out_codec_name))
3776  encoder_name = "native";
3777  }
3778 
3779  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3780  in_codec_name, decoder_name,
3781  out_codec_name, encoder_name);
3782  }
3783  av_log(NULL, AV_LOG_INFO, "\n");
3784  }
3785 
3786  if (ret) {
3787  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3788  return ret;
3789  }
3790 
3792 
3793  return 0;
3794 }
3795 
3796 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3797 static int need_output(void)
3798 {
3799  int i;
3800 
3801  for (i = 0; i < nb_output_streams; i++) {
3802  OutputStream *ost = output_streams[i];
3803  OutputFile *of = output_files[ost->file_index];
3804  AVFormatContext *os = output_files[ost->file_index]->ctx;
3805 
3806  if (ost->finished ||
3807  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3808  continue;
3809  if (ost->frame_number >= ost->max_frames) {
3810  int j;
3811  for (j = 0; j < of->ctx->nb_streams; j++)
3812  close_output_stream(output_streams[of->ost_index + j]);
3813  continue;
3814  }
3815 
3816  return 1;
3817  }
3818 
3819  return 0;
3820 }
3821 
3822 /**
3823  * Select the output stream to process.
3824  *
3825  * @return selected output stream, or NULL if none available
3826  */
3828 {
3829  int i;
3830  int64_t opts_min = INT64_MAX;
3831  OutputStream *ost_min = NULL;
3832 
3833  for (i = 0; i < nb_output_streams; i++) {
3834  OutputStream *ost = output_streams[i];
3835  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3836  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3837  AV_TIME_BASE_Q);
3838  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3840  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3841  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3842 
3843  if (!ost->initialized && !ost->inputs_done)
3844  return ost;
3845 
3846  if (!ost->finished && opts < opts_min) {
3847  opts_min = opts;
3848  ost_min = ost->unavailable ? NULL : ost;
3849  }
3850  }
3851  return ost_min;
3852 }
3853 
3854 static void set_tty_echo(int on)
3855 {
3856 #if HAVE_TERMIOS_H
3857  struct termios tty;
3858  if (tcgetattr(0, &tty) == 0) {
3859  if (on) tty.c_lflag |= ECHO;
3860  else tty.c_lflag &= ~ECHO;
3861  tcsetattr(0, TCSANOW, &tty);
3862  }
3863 #endif
3864 }
3865 
3866 static int check_keyboard_interaction(int64_t cur_time)
3867 {
3868  int i, ret, key;
3869  static int64_t last_time;
3870  if (received_nb_signals)
3871  return AVERROR_EXIT;
3872  /* read_key() returns 0 on EOF */
3873  if(cur_time - last_time >= 100000 && !run_as_daemon){
3874  key = read_key();
3875  last_time = cur_time;
3876  }else
3877  key = -1;
3878  if (key == 'q')
3879  return AVERROR_EXIT;
3880  if (key == '+') av_log_set_level(av_log_get_level()+10);
3881  if (key == '-') av_log_set_level(av_log_get_level()-10);
3882  if (key == 's') qp_hist ^= 1;
3883  if (key == 'h'){
3884  if (do_hex_dump){
3885  do_hex_dump = do_pkt_dump = 0;
3886  } else if(do_pkt_dump){
3887  do_hex_dump = 1;
3888  } else
3889  do_pkt_dump = 1;
3891  }
3892  if (key == 'c' || key == 'C'){
3893  char buf[4096], target[64], command[256], arg[256] = {0};
3894  double time;
3895  int k, n = 0;
3896  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3897  i = 0;
3898  set_tty_echo(1);
3899  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3900  if (k > 0)
3901  buf[i++] = k;
3902  buf[i] = 0;
3903  set_tty_echo(0);
3904  fprintf(stderr, "\n");
3905  if (k > 0 &&
3906  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3907  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3908  target, time, command, arg);
3909  for (i = 0; i < nb_filtergraphs; i++) {
3910  FilterGraph *fg = filtergraphs[i];
3911  if (fg->graph) {
3912  if (time < 0) {
3913  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3914  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3915  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3916  } else if (key == 'c') {
3917  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3918  ret = AVERROR_PATCHWELCOME;
3919  } else {
3920  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3921  if (ret < 0)
3922  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3923  }
3924  }
3925  }
3926  } else {
3928  "Parse error, at least 3 arguments were expected, "
3929  "only %d given in string '%s'\n", n, buf);
3930  }
3931  }
3932  if (key == 'd' || key == 'D'){
3933  int debug=0;
3934  if(key == 'D') {
3935  debug = input_streams[0]->st->codec->debug<<1;
3936  if(!debug) debug = 1;
3937  while(debug & (FF_DEBUG_DCT_COEFF
3938 #if FF_API_DEBUG_MV
3939  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3940 #endif
3941  )) //unsupported, would just crash
3942  debug += debug;
3943  }else{
3944  char buf[32];
3945  int k = 0;
3946  i = 0;
3947  set_tty_echo(1);
3948  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3949  if (k > 0)
3950  buf[i++] = k;
3951  buf[i] = 0;
3952  set_tty_echo(0);
3953  fprintf(stderr, "\n");
3954  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3955  fprintf(stderr,"error parsing debug value\n");
3956  }
3957  for(i=0;i<nb_input_streams;i++) {
3958  input_streams[i]->st->codec->debug = debug;
3959  }
3960  for(i=0;i<nb_output_streams;i++) {
3961  OutputStream *ost = output_streams[i];
3962  ost->enc_ctx->debug = debug;
3963  }
3964  if(debug) av_log_set_level(AV_LOG_DEBUG);
3965  fprintf(stderr,"debug=%d\n", debug);
3966  }
3967  if (key == '?'){
3968  fprintf(stderr, "key function\n"
3969  "? show this help\n"
3970  "+ increase verbosity\n"
3971  "- decrease verbosity\n"
3972  "c Send command to first matching filter supporting it\n"
3973  "C Send/Queue command to all matching filters\n"
3974  "D cycle through available debug modes\n"
3975  "h dump packets/hex press to cycle through the 3 states\n"
3976  "q quit\n"
3977  "s Show QP histogram\n"
3978  );
3979  }
3980  return 0;
3981 }
3982 
3983 #if HAVE_THREADS
3984 static void *input_thread(void *arg)
3985 {
3986  InputFile *f = arg;
3987  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3988  int ret = 0;
3989 
3990  while (1) {
3991  AVPacket pkt;
3992  ret = av_read_frame(f->ctx, &pkt);
3993 
3994  if (ret == AVERROR(EAGAIN)) {
3995  av_usleep(10000);
3996  continue;
3997  }
3998  if (ret < 0) {
3999  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4000  break;
4001  }
4002  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4003  if (flags && ret == AVERROR(EAGAIN)) {
4004  flags = 0;
4005  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4007  "Thread message queue blocking; consider raising the "
4008  "thread_queue_size option (current value: %d)\n",
4009  f->thread_queue_size);
4010  }
4011  if (ret < 0) {
4012  if (ret != AVERROR_EOF)
4013  av_log(f->ctx, AV_LOG_ERROR,
4014  "Unable to send packet to main thread: %s\n",
4015  av_err2str(ret));
4016  av_packet_unref(&pkt);
4017  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4018  break;
4019  }
4020  }
4021 
4022  return NULL;
4023 }
4024 
4025 static void free_input_thread(int i)
4026 {
4027  InputFile *f = input_files[i];
4028  AVPacket pkt;
4029 
4030  if (!f || !f->in_thread_queue)
4031  return;
4033  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4034  av_packet_unref(&pkt);
4035 
4036  pthread_join(f->thread, NULL);
4037  f->joined = 1;
4038  av_thread_message_queue_free(&f->in_thread_queue);
4039 }
4040 
4041 static void free_input_threads(void)
4042 {
4043  int i;
4044 
4045  for (i = 0; i < nb_input_files; i++)
4046  free_input_thread(i);
4047 }
4048 
4049 static int init_input_thread(int i)
4050 {
4051  int ret;
4052  InputFile *f = input_files[i];
4053 
4054  if (nb_input_files == 1)
4055  return 0;
4056 
4057  if (f->ctx->pb ? !f->ctx->pb->seekable :
4058  strcmp(f->ctx->iformat->name, "lavfi"))
4059  f->non_blocking = 1;
4060  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4061  f->thread_queue_size, sizeof(AVPacket));
4062  if (ret < 0)
4063  return ret;
4064 
4065  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4066  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4067  av_thread_message_queue_free(&f->in_thread_queue);
4068  return AVERROR(ret);
4069  }
4070 
4071  return 0;
4072 }
4073 
4074 static int init_input_threads(void)
4075 {
4076  int i, ret;
4077 
4078  for (i = 0; i < nb_input_files; i++) {
4079  ret = init_input_thread(i);
4080  if (ret < 0)
4081  return ret;
4082  }
4083  return 0;
4084 }
4085 
4086 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4087 {
4088  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4089  f->non_blocking ?
4091 }
4092 #endif
4093 
4095 {
4096  if (f->rate_emu) {
4097  int i;
4098  for (i = 0; i < f->nb_streams; i++) {
4099  InputStream *ist = input_streams[f->ist_index + i];
4100  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4101  int64_t now = av_gettime_relative() - ist->start;
4102  if (pts > now)
4103  return AVERROR(EAGAIN);
4104  }
4105  }
4106 
4107 #if HAVE_THREADS
4108  if (nb_input_files > 1)
4109  return get_input_packet_mt(f, pkt);
4110 #endif
4111  return av_read_frame(f->ctx, pkt);
4112 }
4113 
4114 static int got_eagain(void)
4115 {
4116  int i;
4117  for (i = 0; i < nb_output_streams; i++)
4118  if (output_streams[i]->unavailable)
4119  return 1;
4120  return 0;
4121 }
4122 
4123 static void reset_eagain(void)
4124 {
4125  int i;
4126  for (i = 0; i < nb_input_files; i++)
4127  input_files[i]->eagain = 0;
4128  for (i = 0; i < nb_output_streams; i++)
4129  output_streams[i]->unavailable = 0;
4130 }
4131 
4132 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4133 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4134  AVRational time_base)
4135 {
4136  int ret;
4137 
4138  if (!*duration) {
4139  *duration = tmp;
4140  return tmp_time_base;
4141  }
4142 
4143  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4144  if (ret < 0) {
4145  *duration = tmp;
4146  return tmp_time_base;
4147  }
4148 
4149  return time_base;
4150 }
4151 
4153 {
4154  InputStream *ist;
4155  AVCodecContext *avctx;
4156  int i, ret, has_audio = 0;
4157  int64_t duration = 0;
4158 
4159  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4160  if (ret < 0)
4161  return ret;
4162 
4163  for (i = 0; i < ifile->nb_streams; i++) {
4164  ist = input_streams[ifile->ist_index + i];
4165  avctx = ist->dec_ctx;
4166 
4167  /* duration is the length of the last frame in a stream
4168  * when audio stream is present we don't care about
4169  * last video frame length because it's not defined exactly */
4170  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4171  has_audio = 1;
4172  }
4173 
4174  for (i = 0; i < ifile->nb_streams; i++) {
4175  ist = input_streams[ifile->ist_index + i];
4176  avctx = ist->dec_ctx;
4177 
4178  if (has_audio) {
4179  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4180  AVRational sample_rate = {1, avctx->sample_rate};
4181 
4182  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4183  } else {
4184  continue;
4185  }
4186  } else {
4187  if (ist->framerate.num) {
4188  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4189  } else if (ist->st->avg_frame_rate.num) {
4190  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4191  } else {
4192  duration = 1;
4193  }
4194  }
4195  if (!ifile->duration)
4196  ifile->time_base = ist->st->time_base;
4197  /* the total duration of the stream, max_pts - min_pts is
4198  * the duration of the stream without the last frame */
4199  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4200  duration += ist->max_pts - ist->min_pts;
4201  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4202  ifile->time_base);
4203  }
4204 
4205  if (ifile->loop > 0)
4206  ifile->loop--;
4207 
4208  return ret;
4209 }
4210 
4211 /*
4212  * Return
4213  * - 0 -- one packet was read and processed
4214  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4215  * this function should be called again
4216  * - AVERROR_EOF -- this function should not be called again
4217  */
4218 static int process_input(int file_index)
4219 {
4220  InputFile *ifile = input_files[file_index];
4222  InputStream *ist;
4223  AVPacket pkt;
4224  int ret, thread_ret, i, j;
4225  int64_t duration;
4226  int64_t pkt_dts;
4227  int disable_discontinuity_correction = copy_ts;
4228 
4229  is = ifile->ctx;
4230  ret = get_input_packet(ifile, &pkt);
4231 
4232  if (ret == AVERROR(EAGAIN)) {
4233  ifile->eagain = 1;
4234  return ret;
4235  }
4236  if (ret < 0 && ifile->loop) {
4237  AVCodecContext *avctx;
4238  for (i = 0; i < ifile->nb_streams; i++) {
4239  ist = input_streams[ifile->ist_index + i];
4240  avctx = ist->dec_ctx;
4241  if (ist->decoding_needed) {
4242  ret = process_input_packet(ist, NULL, 1);
4243  if (ret>0)
4244  return 0;
4245  avcodec_flush_buffers(avctx);
4246  }
4247  }
4248 #if HAVE_THREADS
4249  free_input_thread(file_index);
4250 #endif
4251  ret = seek_to_start(ifile, is);
4252 #if HAVE_THREADS
4253  thread_ret = init_input_thread(file_index);
4254  if (thread_ret < 0)
4255  return thread_ret;
4256 #endif
4257  if (ret < 0)
4258  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4259  else
4260  ret = get_input_packet(ifile, &pkt);
4261  if (ret == AVERROR(EAGAIN)) {
4262  ifile->eagain = 1;
4263  return ret;
4264  }
4265  }
4266  if (ret < 0) {
4267  if (ret != AVERROR_EOF) {
4268  print_error(is->url, ret);
4269  if (exit_on_error)
4270  exit_program(1);
4271  }
4272 
4273  for (i = 0; i < ifile->nb_streams; i++) {
4274  ist = input_streams[ifile->ist_index + i];
4275  if (ist->decoding_needed) {
4276  ret = process_input_packet(ist, NULL, 0);
4277  if (ret>0)
4278  return 0;
4279  }
4280 
4281  /* mark all outputs that don't go through lavfi as finished */
4282  for (j = 0; j < nb_output_streams; j++) {
4283  OutputStream *ost = output_streams[j];
4284 
4285  if (ost->source_index == ifile->ist_index + i &&
4286  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4287  finish_output_stream(ost);
4288  }
4289  }
4290 
4291  ifile->eof_reached = 1;
4292  return AVERROR(EAGAIN);
4293  }
4294 
4295  reset_eagain();
4296 
4297  if (do_pkt_dump) {
4299  is->streams[pkt.stream_index]);
4300  }
4301  /* the following test is needed in case new streams appear
4302  dynamically in stream : we ignore them */
4303  if (pkt.stream_index >= ifile->nb_streams) {
4304  report_new_stream(file_index, &pkt);
4305  goto discard_packet;
4306  }
4307 
4308  ist = input_streams[ifile->ist_index + pkt.stream_index];
4309 
4310  ist->data_size += pkt.size;
4311  ist->nb_packets++;
4312 
4313  if (ist->discard)
4314  goto discard_packet;
4315 
4316  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4318  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4319  if (exit_on_error)
4320  exit_program(1);
4321  }
4322 
4323  if (debug_ts) {
4324  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4325  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4329  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4330  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4331  av_ts2str(input_files[ist->file_index]->ts_offset),
4332  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4333  }
4334 
4335  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4336  int64_t stime, stime2;
4337  // Correcting starttime based on the enabled streams
4338  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4339  // so we instead do it here as part of discontinuity handling
4340  if ( ist->next_dts == AV_NOPTS_VALUE
4341  && ifile->ts_offset == -is->start_time
4342  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4343  int64_t new_start_time = INT64_MAX;
4344  for (i=0; i<is->nb_streams; i++) {
4345  AVStream *st = is->streams[i];
4346  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4347  continue;
4348  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4349  }
4350  if (new_start_time > is->start_time) {
4351  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4352  ifile->ts_offset = -new_start_time;
4353  }
4354  }
4355 
4356  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4357  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4358  ist->wrap_correction_done = 1;
4359 
4360  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4361  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4362  ist->wrap_correction_done = 0;
4363  }
4364  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4365  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4366  ist->wrap_correction_done = 0;
4367  }
4368  }
4369 
4370  /* add the stream-global side data to the first packet */
4371  if (ist->nb_packets == 1) {
4372  for (i = 0; i < ist->st->nb_side_data; i++) {
4373  AVPacketSideData *src_sd = &ist->st->side_data[i];
4374  uint8_t *dst_data;
4375 
4376  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4377  continue;
4378 
4379  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4380  continue;
4381 
4382  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4383  if (!dst_data)
4384  exit_program(1);
4385 
4386  memcpy(dst_data, src_sd->data, src_sd->size);
4387  }
4388  }
4389 
4390  if (pkt.dts != AV_NOPTS_VALUE)
4391  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4392  if (pkt.pts != AV_NOPTS_VALUE)
4393  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4394 
4395  if (pkt.pts != AV_NOPTS_VALUE)
4396  pkt.pts *= ist->ts_scale;
4397  if (pkt.dts != AV_NOPTS_VALUE)
4398  pkt.dts *= ist->ts_scale;
4399 
4401  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4403  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4404  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4405  int64_t delta = pkt_dts - ifile->last_ts;
4406  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4407  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4408  ifile->ts_offset -= delta;
4410  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4411  delta, ifile->ts_offset);
4412  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4413  if (pkt.pts != AV_NOPTS_VALUE)
4414  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4415  }
4416  }
4417 
4418  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4419  if (pkt.pts != AV_NOPTS_VALUE) {
4420  pkt.pts += duration;
4421  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4422  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4423  }
4424 
4425  if (pkt.dts != AV_NOPTS_VALUE)
4426  pkt.dts += duration;
4427 
4429 
4430  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4431  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4432  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4433  ist->st->time_base, AV_TIME_BASE_Q,
4435  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4436  disable_discontinuity_correction = 0;
4437  }
4438 
4439  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4441  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4442  !disable_discontinuity_correction) {
4443  int64_t delta = pkt_dts - ist->next_dts;
4444  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4445  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4446  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4447  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4448  ifile->ts_offset -= delta;
4450  "timestamp discontinuity for stream #%d:%d "
4451  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4452  ist->file_index, ist->st->index, ist->st->id,
4454  delta, ifile->ts_offset);
4455  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4456  if (pkt.pts != AV_NOPTS_VALUE)
4457  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4458  }
4459  } else {
4460  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4461  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4462  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4463  pkt.dts = AV_NOPTS_VALUE;
4464  }
4465  if (pkt.pts != AV_NOPTS_VALUE){
4466  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4467  delta = pkt_pts - ist->next_dts;
4468  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4469  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4470  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4471  pkt.pts = AV_NOPTS_VALUE;
4472  }
4473  }
4474  }
4475  }
4476 
4477  if (pkt.dts != AV_NOPTS_VALUE)
4478  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4479 
4480  if (debug_ts) {
4481  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4483  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4484  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4485  av_ts2str(input_files[ist->file_index]->ts_offset),
4486  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4487  }
4488 
4489  sub2video_heartbeat(ist, pkt.pts);
4490 
4491  process_input_packet(ist, &pkt, 0);
4492 
4493 discard_packet:
4494  av_packet_unref(&pkt);
4495 
4496  return 0;
4497 }
4498 
4499 /**
4500  * Perform a step of transcoding for the specified filter graph.
4501  *
4502  * @param[in] graph filter graph to consider
4503  * @param[out] best_ist input stream where a frame would allow to continue
4504  * @return 0 for success, <0 for error
4505  */
4506 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4507 {
4508  int i, ret;
4509  int nb_requests, nb_requests_max = 0;
4510  InputFilter *ifilter;
4511  InputStream *ist;
4512 
4513  *best_ist = NULL;
4514  ret = avfilter_graph_request_oldest(graph->graph);
4515  if (ret >= 0)
4516  return reap_filters(0);
4517 
4518  if (ret == AVERROR_EOF) {
4519  ret = reap_filters(1);
4520  for (i = 0; i < graph->nb_outputs; i++)
4521  close_output_stream(graph->outputs[i]->ost);
4522  return ret;
4523  }
4524  if (ret != AVERROR(EAGAIN))
4525  return ret;
4526 
4527  for (i = 0; i < graph->nb_inputs; i++) {
4528  ifilter = graph->inputs[i];
4529  ist = ifilter->ist;
4530  if (input_files[ist->file_index]->eagain ||
4531  input_files[ist->file_index]->eof_reached)
4532  continue;
4533  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4534  if (nb_requests > nb_requests_max) {
4535  nb_requests_max = nb_requests;
4536  *best_ist = ist;
4537  }
4538  }
4539 
4540  if (!*best_ist)
4541  for (i = 0; i < graph->nb_outputs; i++)
4542  graph->outputs[i]->ost->unavailable = 1;
4543 
4544  return 0;
4545 }
4546 
4547 /**
4548  * Run a single step of transcoding.
4549  *
4550  * @return 0 for success, <0 for error
4551  */
4552 static int transcode_step(void)
4553 {
4554  OutputStream *ost;
4555  InputStream *ist = NULL;
4556  int ret;
4557 
4558  ost = choose_output();
4559  if (!ost) {
4560  if (got_eagain()) {
4561  reset_eagain();
4562  av_usleep(10000);
4563  return 0;
4564  }
4565  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4566  return AVERROR_EOF;
4567  }
4568 
4569  if (ost->filter && !ost->filter->graph->graph) {
4571  ret = configure_filtergraph(ost->filter->graph);