FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
320  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
348 
349 static void
351 {
352  int ret;
353  received_sigterm = sig;
356  if(received_nb_signals > 3) {
357  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
358  strlen("Received > 3 system signals, hard exiting\n"));
359  if (ret < 0) { /* Do nothing */ };
360  exit(123);
361  }
362 }
363 
364 #if HAVE_SETCONSOLECTRLHANDLER
365 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366 {
367  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
368 
369  switch (fdwCtrlType)
370  {
371  case CTRL_C_EVENT:
372  case CTRL_BREAK_EVENT:
373  sigterm_handler(SIGINT);
374  return TRUE;
375 
376  case CTRL_CLOSE_EVENT:
377  case CTRL_LOGOFF_EVENT:
378  case CTRL_SHUTDOWN_EVENT:
379  sigterm_handler(SIGTERM);
380  /* Basically, with these 3 events, when we return from this method the
381  process is hard terminated, so stall as long as we need to
382  to try and let the main thread(s) clean up and gracefully terminate
383  (we have at most 5 seconds, but should be done far before that). */
384  while (!ffmpeg_exited) {
385  Sleep(0);
386  }
387  return TRUE;
388 
389  default:
390  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
391  return FALSE;
392  }
393 }
394 #endif
395 
396 void term_init(void)
397 {
398 #if HAVE_TERMIOS_H
400  struct termios tty;
401  if (tcgetattr (0, &tty) == 0) {
402  oldtty = tty;
403  restore_tty = 1;
404 
405  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
406  |INLCR|IGNCR|ICRNL|IXON);
407  tty.c_oflag |= OPOST;
408  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
409  tty.c_cflag &= ~(CSIZE|PARENB);
410  tty.c_cflag |= CS8;
411  tty.c_cc[VMIN] = 1;
412  tty.c_cc[VTIME] = 0;
413 
414  tcsetattr (0, TCSANOW, &tty);
415  }
416  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
417  }
418 #endif
419 
420  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
421  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
422 #ifdef SIGXCPU
423  signal(SIGXCPU, sigterm_handler);
424 #endif
425 #ifdef SIGPIPE
426  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
427 #endif
428 #if HAVE_SETCONSOLECTRLHANDLER
429  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
430 #endif
431 }
432 
433 /* read a key without blocking */
434 static int read_key(void)
435 {
436  unsigned char ch;
437 #if HAVE_TERMIOS_H
438  int n = 1;
439  struct timeval tv;
440  fd_set rfds;
441 
442  FD_ZERO(&rfds);
443  FD_SET(0, &rfds);
444  tv.tv_sec = 0;
445  tv.tv_usec = 0;
446  n = select(1, &rfds, NULL, NULL, &tv);
447  if (n > 0) {
448  n = read(0, &ch, 1);
449  if (n == 1)
450  return ch;
451 
452  return n;
453  }
454 #elif HAVE_KBHIT
455 # if HAVE_PEEKNAMEDPIPE
456  static int is_pipe;
457  static HANDLE input_handle;
458  DWORD dw, nchars;
459  if(!input_handle){
460  input_handle = GetStdHandle(STD_INPUT_HANDLE);
461  is_pipe = !GetConsoleMode(input_handle, &dw);
462  }
463 
464  if (is_pipe) {
465  /* When running under a GUI, you will end here. */
466  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
467  // input pipe may have been closed by the program that ran ffmpeg
468  return -1;
469  }
470  //Read it
471  if(nchars != 0) {
472  read(0, &ch, 1);
473  return ch;
474  }else{
475  return -1;
476  }
477  }
478 # endif
479  if(kbhit())
480  return(getch());
481 #endif
482  return -1;
483 }
484 
485 static int decode_interrupt_cb(void *ctx)
486 {
488 }
489 
491 
492 static void ffmpeg_cleanup(int ret)
493 {
494  int i, j;
495 
496  if (do_benchmark) {
497  int maxrss = getmaxrss() / 1024;
498  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
499  }
500 
501  for (i = 0; i < nb_filtergraphs; i++) {
502  FilterGraph *fg = filtergraphs[i];
504  for (j = 0; j < fg->nb_inputs; j++) {
505  InputFilter *ifilter = fg->inputs[j];
506  struct InputStream *ist = ifilter->ist;
507 
508  while (av_fifo_size(ifilter->frame_queue)) {
509  AVFrame *frame;
510  av_fifo_generic_read(ifilter->frame_queue, &frame,
511  sizeof(frame), NULL);
512  av_frame_free(&frame);
513  }
514  av_fifo_freep(&ifilter->frame_queue);
515  if (ist->sub2video.sub_queue) {
516  while (av_fifo_size(ist->sub2video.sub_queue)) {
517  AVSubtitle sub;
519  &sub, sizeof(sub), NULL);
520  avsubtitle_free(&sub);
521  }
523  }
524  av_buffer_unref(&ifilter->hw_frames_ctx);
525  av_freep(&ifilter->name);
526  av_freep(&fg->inputs[j]);
527  }
528  av_freep(&fg->inputs);
529  for (j = 0; j < fg->nb_outputs; j++) {
530  OutputFilter *ofilter = fg->outputs[j];
531 
532  avfilter_inout_free(&ofilter->out_tmp);
533  av_freep(&ofilter->name);
534  av_freep(&ofilter->formats);
535  av_freep(&ofilter->channel_layouts);
536  av_freep(&ofilter->sample_rates);
537  av_freep(&fg->outputs[j]);
538  }
539  av_freep(&fg->outputs);
540  av_freep(&fg->graph_desc);
541 
542  av_freep(&filtergraphs[i]);
543  }
544  av_freep(&filtergraphs);
545 
547 
548  /* close files */
549  for (i = 0; i < nb_output_files; i++) {
550  OutputFile *of = output_files[i];
552  if (!of)
553  continue;
554  s = of->ctx;
555  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
556  avio_closep(&s->pb);
558  av_dict_free(&of->opts);
559 
560  av_freep(&output_files[i]);
561  }
562  for (i = 0; i < nb_output_streams; i++) {
563  OutputStream *ost = output_streams[i];
564 
565  if (!ost)
566  continue;
567 
568  av_bsf_free(&ost->bsf_ctx);
569 
571  av_frame_free(&ost->last_frame);
572  av_dict_free(&ost->encoder_opts);
573 
574  av_freep(&ost->forced_keyframes);
576  av_freep(&ost->avfilter);
577  av_freep(&ost->logfile_prefix);
578 
580  ost->audio_channels_mapped = 0;
581 
582  av_dict_free(&ost->sws_dict);
583  av_dict_free(&ost->swr_opts);
584 
587 
588  if (ost->muxing_queue) {
589  while (av_fifo_size(ost->muxing_queue)) {
590  AVPacket pkt;
591  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
592  av_packet_unref(&pkt);
593  }
595  }
596 
597  av_freep(&output_streams[i]);
598  }
599 #if HAVE_THREADS
600  free_input_threads();
601 #endif
602  for (i = 0; i < nb_input_files; i++) {
603  avformat_close_input(&input_files[i]->ctx);
604  av_freep(&input_files[i]);
605  }
606  for (i = 0; i < nb_input_streams; i++) {
607  InputStream *ist = input_streams[i];
608 
611  av_dict_free(&ist->decoder_opts);
614  av_freep(&ist->filters);
615  av_freep(&ist->hwaccel_device);
616  av_freep(&ist->dts_buffer);
617 
619 
620  av_freep(&input_streams[i]);
621  }
622 
623  if (vstats_file) {
624  if (fclose(vstats_file))
626  "Error closing vstats file, loss of information possible: %s\n",
627  av_err2str(AVERROR(errno)));
628  }
630 
631  av_freep(&input_streams);
632  av_freep(&input_files);
633  av_freep(&output_streams);
634  av_freep(&output_files);
635 
636  uninit_opts();
637 
639 
640  if (received_sigterm) {
641  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
642  (int) received_sigterm);
643  } else if (ret && atomic_load(&transcode_init_done)) {
644  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
645  }
646  term_exit();
647  ffmpeg_exited = 1;
648 }
649 
651 {
652  AVDictionaryEntry *t = NULL;
653 
654  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
656  }
657 }
658 
660 {
662  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
663  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
664  exit_program(1);
665  }
666 }
667 
668 static void abort_codec_experimental(AVCodec *c, int encoder)
669 {
670  exit_program(1);
671 }
672 
673 static void update_benchmark(const char *fmt, ...)
674 {
675  if (do_benchmark_all) {
677  va_list va;
678  char buf[1024];
679 
680  if (fmt) {
681  va_start(va, fmt);
682  vsnprintf(buf, sizeof(buf), fmt, va);
683  va_end(va);
685  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
686  t.user_usec - current_time.user_usec,
687  t.sys_usec - current_time.sys_usec,
688  t.real_usec - current_time.real_usec, buf);
689  }
690  current_time = t;
691  }
692 }
693 
695 {
696  int i;
697  for (i = 0; i < nb_output_streams; i++) {
698  OutputStream *ost2 = output_streams[i];
699  ost2->finished |= ost == ost2 ? this_stream : others;
700  }
701 }
702 
703 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
704 {
705  AVFormatContext *s = of->ctx;
706  AVStream *st = ost->st;
707  int ret;
708 
709  /*
710  * Audio encoders may split the packets -- #frames in != #packets out.
711  * But there is no reordering, so we can limit the number of output packets
712  * by simply dropping them here.
713  * Counting encoded video frames needs to be done separately because of
714  * reordering, see do_video_out().
715  * Do not count the packet when unqueued because it has been counted when queued.
716  */
717  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
718  if (ost->frame_number >= ost->max_frames) {
719  av_packet_unref(pkt);
720  return;
721  }
722  ost->frame_number++;
723  }
724 
725  if (!of->header_written) {
726  AVPacket tmp_pkt = {0};
727  /* the muxer is not initialized yet, buffer the packet */
728  if (!av_fifo_space(ost->muxing_queue)) {
729  unsigned int are_we_over_size =
731  int new_size = are_we_over_size ?
732  FFMIN(2 * av_fifo_size(ost->muxing_queue),
733  ost->max_muxing_queue_size) :
734  2 * av_fifo_size(ost->muxing_queue);
735 
736  if (new_size <= av_fifo_size(ost->muxing_queue)) {
738  "Too many packets buffered for output stream %d:%d.\n",
739  ost->file_index, ost->st->index);
740  exit_program(1);
741  }
742  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
743  if (ret < 0)
744  exit_program(1);
745  }
746  ret = av_packet_make_refcounted(pkt);
747  if (ret < 0)
748  exit_program(1);
749  av_packet_move_ref(&tmp_pkt, pkt);
750  ost->muxing_queue_data_size += tmp_pkt.size;
751  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
752  return;
753  }
754 
757  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
758 
759  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
760  int i;
762  NULL);
763  ost->quality = sd ? AV_RL32(sd) : -1;
764  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
765 
766  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
767  if (sd && i < sd[5])
768  ost->error[i] = AV_RL64(sd + 8 + 8*i);
769  else
770  ost->error[i] = -1;
771  }
772 
773  if (ost->frame_rate.num && ost->is_cfr) {
774  if (pkt->duration > 0)
775  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
776  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
777  ost->mux_timebase);
778  }
779  }
780 
781  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
782 
783  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
784  if (pkt->dts != AV_NOPTS_VALUE &&
785  pkt->pts != AV_NOPTS_VALUE &&
786  pkt->dts > pkt->pts) {
787  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
788  pkt->dts, pkt->pts,
789  ost->file_index, ost->st->index);
790  pkt->pts =
791  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
792  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
793  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
794  }
796  pkt->dts != AV_NOPTS_VALUE &&
797  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
798  ost->last_mux_dts != AV_NOPTS_VALUE) {
799  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
800  if (pkt->dts < max) {
801  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
802  if (exit_on_error)
803  loglevel = AV_LOG_ERROR;
804  av_log(s, loglevel, "Non-monotonous DTS in output stream "
805  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
806  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
807  if (exit_on_error) {
808  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
809  exit_program(1);
810  }
811  av_log(s, loglevel, "changing to %"PRId64". This may result "
812  "in incorrect timestamps in the output file.\n",
813  max);
814  if (pkt->pts >= pkt->dts)
815  pkt->pts = FFMAX(pkt->pts, max);
816  pkt->dts = max;
817  }
818  }
819  }
820  ost->last_mux_dts = pkt->dts;
821 
822  ost->data_size += pkt->size;
823  ost->packets_written++;
824 
825  pkt->stream_index = ost->index;
826 
827  if (debug_ts) {
828  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
829  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
831  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
832  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
833  pkt->size
834  );
835  }
836 
837  ret = av_interleaved_write_frame(s, pkt);
838  if (ret < 0) {
839  print_error("av_interleaved_write_frame()", ret);
840  main_return_code = 1;
842  }
843  av_packet_unref(pkt);
844 }
845 
847 {
848  OutputFile *of = output_files[ost->file_index];
849 
850  ost->finished |= ENCODER_FINISHED;
851  if (of->shortest) {
852  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
853  of->recording_time = FFMIN(of->recording_time, end);
854  }
855 }
856 
857 /*
858  * Send a single packet to the output, applying any bitstream filters
859  * associated with the output stream. This may result in any number
860  * of packets actually being written, depending on what bitstream
861  * filters are applied. The supplied packet is consumed and will be
862  * blank (as if newly-allocated) when this function returns.
863  *
864  * If eof is set, instead indicate EOF to all bitstream filters and
865  * therefore flush any delayed packets to the output. A blank packet
866  * must be supplied in this case.
867  */
869  OutputStream *ost, int eof)
870 {
871  int ret = 0;
872 
873  /* apply the output bitstream filters */
874  if (ost->bsf_ctx) {
875  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
876  if (ret < 0)
877  goto finish;
878  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
879  write_packet(of, pkt, ost, 0);
880  if (ret == AVERROR(EAGAIN))
881  ret = 0;
882  } else if (!eof)
883  write_packet(of, pkt, ost, 0);
884 
885 finish:
886  if (ret < 0 && ret != AVERROR_EOF) {
887  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
888  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
889  if(exit_on_error)
890  exit_program(1);
891  }
892 }
893 
895 {
896  OutputFile *of = output_files[ost->file_index];
897 
898  if (of->recording_time != INT64_MAX &&
900  AV_TIME_BASE_Q) >= 0) {
901  close_output_stream(ost);
902  return 0;
903  }
904  return 1;
905 }
906 
908  AVFrame *frame)
909 {
910  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
911  AVCodecContext *enc = ost->enc_ctx;
912  if (!frame || frame->pts == AV_NOPTS_VALUE ||
913  !enc || !ost->filter || !ost->filter->graph->graph)
914  goto early_exit;
915 
916  {
918 
919  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
920  AVRational filter_tb = av_buffersink_get_time_base(filter);
921  AVRational tb = enc->time_base;
922  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
923 
924  tb.den <<= extra_bits;
925  float_pts =
926  av_rescale_q(frame->pts, filter_tb, tb) -
927  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
928  float_pts /= 1 << extra_bits;
929  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
930  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
931 
932  frame->pts =
933  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
934  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
935  }
936 
937 early_exit:
938 
939  if (debug_ts) {
940  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
941  frame ? av_ts2str(frame->pts) : "NULL",
942  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
943  float_pts,
944  enc ? enc->time_base.num : -1,
945  enc ? enc->time_base.den : -1);
946  }
947 
948  return float_pts;
949 }
950 
952  char *error, int error_len);
953 
955  unsigned int fatal)
956 {
957  int ret = AVERROR_BUG;
958  char error[1024] = {0};
959 
960  if (ost->initialized)
961  return 0;
962 
963  ret = init_output_stream(ost, frame, error, sizeof(error));
964  if (ret < 0) {
965  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
966  ost->file_index, ost->index, error);
967 
968  if (fatal)
969  exit_program(1);
970  }
971 
972  return ret;
973 }
974 
975 static void do_audio_out(OutputFile *of, OutputStream *ost,
976  AVFrame *frame)
977 {
978  AVCodecContext *enc = ost->enc_ctx;
979  AVPacket pkt;
980  int ret;
981 
982  av_init_packet(&pkt);
983  pkt.data = NULL;
984  pkt.size = 0;
985 
986  adjust_frame_pts_to_encoder_tb(of, ost, frame);
987 
988  if (!check_recording_time(ost))
989  return;
990 
991  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
992  frame->pts = ost->sync_opts;
993  ost->sync_opts = frame->pts + frame->nb_samples;
994  ost->samples_encoded += frame->nb_samples;
995  ost->frames_encoded++;
996 
997  av_assert0(pkt.size || !pkt.data);
999  if (debug_ts) {
1000  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1001  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1002  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1003  enc->time_base.num, enc->time_base.den);
1004  }
1005 
1006  ret = avcodec_send_frame(enc, frame);
1007  if (ret < 0)
1008  goto error;
1009 
1010  while (1) {
1011  ret = avcodec_receive_packet(enc, &pkt);
1012  if (ret == AVERROR(EAGAIN))
1013  break;
1014  if (ret < 0)
1015  goto error;
1016 
1017  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1018 
1019  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1020 
1021  if (debug_ts) {
1022  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1023  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1024  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1025  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1026  }
1027 
1028  output_packet(of, &pkt, ost, 0);
1029  }
1030 
1031  return;
1032 error:
1033  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1034  exit_program(1);
1035 }
1036 
1037 static void do_subtitle_out(OutputFile *of,
1038  OutputStream *ost,
1039  AVSubtitle *sub)
1040 {
1041  int subtitle_out_max_size = 1024 * 1024;
1042  int subtitle_out_size, nb, i;
1043  AVCodecContext *enc;
1044  AVPacket pkt;
1045  int64_t pts;
1046 
1047  if (sub->pts == AV_NOPTS_VALUE) {
1048  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1049  if (exit_on_error)
1050  exit_program(1);
1051  return;
1052  }
1053 
1054  enc = ost->enc_ctx;
1055 
1056  if (!subtitle_out) {
1057  subtitle_out = av_malloc(subtitle_out_max_size);
1058  if (!subtitle_out) {
1059  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1060  exit_program(1);
1061  }
1062  }
1063 
1064  /* Note: DVB subtitle need one packet to draw them and one other
1065  packet to clear them */
1066  /* XXX: signal it in the codec context ? */
1067  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1068  nb = 2;
1069  else
1070  nb = 1;
1071 
1072  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1073  pts = sub->pts;
1074  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1075  pts -= output_files[ost->file_index]->start_time;
1076  for (i = 0; i < nb; i++) {
1077  unsigned save_num_rects = sub->num_rects;
1078 
1079  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1080  if (!check_recording_time(ost))
1081  return;
1082 
1083  sub->pts = pts;
1084  // start_display_time is required to be 0
1085  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1086  sub->end_display_time -= sub->start_display_time;
1087  sub->start_display_time = 0;
1088  if (i == 1)
1089  sub->num_rects = 0;
1090 
1091  ost->frames_encoded++;
1092 
1093  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1094  subtitle_out_max_size, sub);
1095  if (i == 1)
1096  sub->num_rects = save_num_rects;
1097  if (subtitle_out_size < 0) {
1098  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1099  exit_program(1);
1100  }
1101 
1102  av_init_packet(&pkt);
1103  pkt.data = subtitle_out;
1104  pkt.size = subtitle_out_size;
1105  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1106  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1107  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1108  /* XXX: the pts correction is handled here. Maybe handling
1109  it in the codec would be better */
1110  if (i == 0)
1111  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1112  else
1113  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1114  }
1115  pkt.dts = pkt.pts;
1116  output_packet(of, &pkt, ost, 0);
1117  }
1118 }
1119 
1120 static void do_video_out(OutputFile *of,
1121  OutputStream *ost,
1122  AVFrame *next_picture)
1123 {
1124  int ret, format_video_sync;
1125  AVPacket pkt;
1126  AVCodecContext *enc = ost->enc_ctx;
1127  AVRational frame_rate;
1128  int nb_frames, nb0_frames, i;
1129  double delta, delta0;
1130  double duration = 0;
1131  double sync_ipts = AV_NOPTS_VALUE;
1132  int frame_size = 0;
1133  InputStream *ist = NULL;
1135 
1136  init_output_stream_wrapper(ost, next_picture, 1);
1137  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1138 
1139  if (ost->source_index >= 0)
1140  ist = input_streams[ost->source_index];
1141 
1142  frame_rate = av_buffersink_get_frame_rate(filter);
1143  if (frame_rate.num > 0 && frame_rate.den > 0)
1144  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1145 
1146  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1147  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1148 
1149  if (!ost->filters_script &&
1150  !ost->filters &&
1151  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1152  next_picture &&
1153  ist &&
1154  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1155  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1156  }
1157 
1158  if (!next_picture) {
1159  //end, flushing
1160  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1161  ost->last_nb0_frames[1],
1162  ost->last_nb0_frames[2]);
1163  } else {
1164  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1165  delta = delta0 + duration;
1166 
1167  /* by default, we output a single frame */
1168  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1169  nb_frames = 1;
1170 
1171  format_video_sync = video_sync_method;
1172  if (format_video_sync == VSYNC_AUTO) {
1173  if(!strcmp(of->ctx->oformat->name, "avi")) {
1174  format_video_sync = VSYNC_VFR;
1175  } else
1176  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1177  if ( ist
1178  && format_video_sync == VSYNC_CFR
1179  && input_files[ist->file_index]->ctx->nb_streams == 1
1180  && input_files[ist->file_index]->input_ts_offset == 0) {
1181  format_video_sync = VSYNC_VSCFR;
1182  }
1183  if (format_video_sync == VSYNC_CFR && copy_ts) {
1184  format_video_sync = VSYNC_VSCFR;
1185  }
1186  }
1187  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1188 
1189  if (delta0 < 0 &&
1190  delta > 0 &&
1191  format_video_sync != VSYNC_PASSTHROUGH &&
1192  format_video_sync != VSYNC_DROP) {
1193  if (delta0 < -0.6) {
1194  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1195  } else
1196  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1197  sync_ipts = ost->sync_opts;
1198  duration += delta0;
1199  delta0 = 0;
1200  }
1201 
1202  switch (format_video_sync) {
1203  case VSYNC_VSCFR:
1204  if (ost->frame_number == 0 && delta0 >= 0.5) {
1205  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1206  delta = duration;
1207  delta0 = 0;
1208  ost->sync_opts = llrint(sync_ipts);
1209  }
1210  case VSYNC_CFR:
1211  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1212  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1213  nb_frames = 0;
1214  } else if (delta < -1.1)
1215  nb_frames = 0;
1216  else if (delta > 1.1) {
1217  nb_frames = lrintf(delta);
1218  if (delta0 > 1.1)
1219  nb0_frames = llrintf(delta0 - 0.6);
1220  }
1221  break;
1222  case VSYNC_VFR:
1223  if (delta <= -0.6)
1224  nb_frames = 0;
1225  else if (delta > 0.6)
1226  ost->sync_opts = llrint(sync_ipts);
1227  break;
1228  case VSYNC_DROP:
1229  case VSYNC_PASSTHROUGH:
1230  ost->sync_opts = llrint(sync_ipts);
1231  break;
1232  default:
1233  av_assert0(0);
1234  }
1235  }
1236 
1237  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1238  nb0_frames = FFMIN(nb0_frames, nb_frames);
1239 
1240  memmove(ost->last_nb0_frames + 1,
1241  ost->last_nb0_frames,
1242  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1243  ost->last_nb0_frames[0] = nb0_frames;
1244 
1245  if (nb0_frames == 0 && ost->last_dropped) {
1246  nb_frames_drop++;
1248  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1249  ost->frame_number, ost->st->index, ost->last_frame->pts);
1250  }
1251  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1252  if (nb_frames > dts_error_threshold * 30) {
1253  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1254  nb_frames_drop++;
1255  return;
1256  }
1257  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1258  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1259  if (nb_frames_dup > dup_warning) {
1260  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1261  dup_warning *= 10;
1262  }
1263  }
1264  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1265 
1266  /* duplicates frame if needed */
1267  for (i = 0; i < nb_frames; i++) {
1268  AVFrame *in_picture;
1269  int forced_keyframe = 0;
1270  double pts_time;
1271  av_init_packet(&pkt);
1272  pkt.data = NULL;
1273  pkt.size = 0;
1274 
1275  if (i < nb0_frames && ost->last_frame) {
1276  in_picture = ost->last_frame;
1277  } else
1278  in_picture = next_picture;
1279 
1280  if (!in_picture)
1281  return;
1282 
1283  in_picture->pts = ost->sync_opts;
1284 
1285  if (!check_recording_time(ost))
1286  return;
1287 
1288  in_picture->quality = enc->global_quality;
1289  in_picture->pict_type = 0;
1290 
1291  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1292  in_picture->pts != AV_NOPTS_VALUE)
1293  ost->forced_kf_ref_pts = in_picture->pts;
1294 
1295  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1296  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1297  if (ost->forced_kf_index < ost->forced_kf_count &&
1298  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1299  ost->forced_kf_index++;
1300  forced_keyframe = 1;
1301  } else if (ost->forced_keyframes_pexpr) {
1302  double res;
1303  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1306  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1312  res);
1313  if (res) {
1314  forced_keyframe = 1;
1320  }
1321 
1323  } else if ( ost->forced_keyframes
1324  && !strncmp(ost->forced_keyframes, "source", 6)
1325  && in_picture->key_frame==1
1326  && !i) {
1327  forced_keyframe = 1;
1328  }
1329 
1330  if (forced_keyframe) {
1331  in_picture->pict_type = AV_PICTURE_TYPE_I;
1332  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1333  }
1334 
1336  if (debug_ts) {
1337  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1338  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1339  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1340  enc->time_base.num, enc->time_base.den);
1341  }
1342 
1343  ost->frames_encoded++;
1344 
1345  ret = avcodec_send_frame(enc, in_picture);
1346  if (ret < 0)
1347  goto error;
1348  // Make sure Closed Captions will not be duplicated
1350 
1351  while (1) {
1352  ret = avcodec_receive_packet(enc, &pkt);
1353  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1354  if (ret == AVERROR(EAGAIN))
1355  break;
1356  if (ret < 0)
1357  goto error;
1358 
1359  if (debug_ts) {
1360  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1361  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1362  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1363  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1364  }
1365 
1366  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1367  pkt.pts = ost->sync_opts;
1368 
1369  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1370 
1371  if (debug_ts) {
1372  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1373  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1374  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1375  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1376  }
1377 
1378  frame_size = pkt.size;
1379  output_packet(of, &pkt, ost, 0);
1380 
1381  /* if two pass, output log */
1382  if (ost->logfile && enc->stats_out) {
1383  fprintf(ost->logfile, "%s", enc->stats_out);
1384  }
1385  }
1386  ost->sync_opts++;
1387  /*
1388  * For video, number of frames in == number of packets out.
1389  * But there may be reordering, so we can't throw away frames on encoder
1390  * flush, we need to limit them here, before they go into encoder.
1391  */
1392  ost->frame_number++;
1393 
1394  if (vstats_filename && frame_size)
1395  do_video_stats(ost, frame_size);
1396  }
1397 
1398  if (!ost->last_frame)
1399  ost->last_frame = av_frame_alloc();
1400  av_frame_unref(ost->last_frame);
1401  if (next_picture && ost->last_frame)
1402  av_frame_ref(ost->last_frame, next_picture);
1403  else
1404  av_frame_free(&ost->last_frame);
1405 
1406  return;
1407 error:
1408  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1409  exit_program(1);
1410 }
1411 
1412 static double psnr(double d)
1413 {
1414  return -10.0 * log10(d);
1415 }
1416 
1418 {
1419  AVCodecContext *enc;
1420  int frame_number;
1421  double ti1, bitrate, avg_bitrate;
1422 
1423  /* this is executed just the first time do_video_stats is called */
1424  if (!vstats_file) {
1425  vstats_file = fopen(vstats_filename, "w");
1426  if (!vstats_file) {
1427  perror("fopen");
1428  exit_program(1);
1429  }
1430  }
1431 
1432  enc = ost->enc_ctx;
1433  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1434  frame_number = ost->st->nb_frames;
1435  if (vstats_version <= 1) {
1436  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1437  ost->quality / (float)FF_QP2LAMBDA);
1438  } else {
1439  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1440  ost->quality / (float)FF_QP2LAMBDA);
1441  }
1442 
1443  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1444  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1445 
1446  fprintf(vstats_file,"f_size= %6d ", frame_size);
1447  /* compute pts value */
1448  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1449  if (ti1 < 0.01)
1450  ti1 = 0.01;
1451 
1452  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1453  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1454  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1455  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1456  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1457  }
1458 }
1459 
1461 {
1462  OutputFile *of = output_files[ost->file_index];
1463  int i;
1464 
1466 
1467  if (of->shortest) {
1468  for (i = 0; i < of->ctx->nb_streams; i++)
1469  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1470  }
1471 }
1472 
1473 /**
1474  * Get and encode new output from any of the filtergraphs, without causing
1475  * activity.
1476  *
1477  * @return 0 for success, <0 for severe errors
1478  */
1479 static int reap_filters(int flush)
1480 {
1481  AVFrame *filtered_frame = NULL;
1482  int i;
1483 
1484  /* Reap all buffers present in the buffer sinks */
1485  for (i = 0; i < nb_output_streams; i++) {
1486  OutputStream *ost = output_streams[i];
1487  OutputFile *of = output_files[ost->file_index];
1489  AVCodecContext *enc = ost->enc_ctx;
1490  int ret = 0;
1491 
1492  if (!ost->filter || !ost->filter->graph->graph)
1493  continue;
1494  filter = ost->filter->filter;
1495 
1496  /*
1497  * Unlike video, with audio the audio frame size matters.
1498  * Currently we are fully reliant on the lavfi filter chain to
1499  * do the buffering deed for us, and thus the frame size parameter
1500  * needs to be set accordingly. Where does one get the required
1501  * frame size? From the initialized AVCodecContext of an audio
1502  * encoder. Thus, if we have gotten to an audio stream, initialize
1503  * the encoder earlier than receiving the first AVFrame.
1504  */
1507 
1508  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1509  return AVERROR(ENOMEM);
1510  }
1511  filtered_frame = ost->filtered_frame;
1512 
1513  while (1) {
1514  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1516  if (ret < 0) {
1517  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1519  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1520  } else if (flush && ret == AVERROR_EOF) {
1522  do_video_out(of, ost, NULL);
1523  }
1524  break;
1525  }
1526  if (ost->finished) {
1527  av_frame_unref(filtered_frame);
1528  continue;
1529  }
1530 
1531  switch (av_buffersink_get_type(filter)) {
1532  case AVMEDIA_TYPE_VIDEO:
1533  if (!ost->frame_aspect_ratio.num)
1534  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1535 
1536  do_video_out(of, ost, filtered_frame);
1537  break;
1538  case AVMEDIA_TYPE_AUDIO:
1539  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1540  enc->channels != filtered_frame->channels) {
1542  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1543  break;
1544  }
1545  do_audio_out(of, ost, filtered_frame);
1546  break;
1547  default:
1548  // TODO support subtitle filters
1549  av_assert0(0);
1550  }
1551 
1552  av_frame_unref(filtered_frame);
1553  }
1554  }
1555 
1556  return 0;
1557 }
1558 
1559 static void print_final_stats(int64_t total_size)
1560 {
1561  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1562  uint64_t subtitle_size = 0;
1563  uint64_t data_size = 0;
1564  float percent = -1.0;
1565  int i, j;
1566  int pass1_used = 1;
1567 
1568  for (i = 0; i < nb_output_streams; i++) {
1569  OutputStream *ost = output_streams[i];
1570  switch (ost->enc_ctx->codec_type) {
1571  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1572  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1573  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1574  default: other_size += ost->data_size; break;
1575  }
1576  extra_size += ost->enc_ctx->extradata_size;
1577  data_size += ost->data_size;
1580  pass1_used = 0;
1581  }
1582 
1583  if (data_size && total_size>0 && total_size >= data_size)
1584  percent = 100.0 * (total_size - data_size) / data_size;
1585 
1586  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1587  video_size / 1024.0,
1588  audio_size / 1024.0,
1589  subtitle_size / 1024.0,
1590  other_size / 1024.0,
1591  extra_size / 1024.0);
1592  if (percent >= 0.0)
1593  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1594  else
1595  av_log(NULL, AV_LOG_INFO, "unknown");
1596  av_log(NULL, AV_LOG_INFO, "\n");
1597 
1598  /* print verbose per-stream stats */
1599  for (i = 0; i < nb_input_files; i++) {
1600  InputFile *f = input_files[i];
1601  uint64_t total_packets = 0, total_size = 0;
1602 
1603  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1604  i, f->ctx->url);
1605 
1606  for (j = 0; j < f->nb_streams; j++) {
1607  InputStream *ist = input_streams[f->ist_index + j];
1608  enum AVMediaType type = ist->dec_ctx->codec_type;
1609 
1610  total_size += ist->data_size;
1611  total_packets += ist->nb_packets;
1612 
1613  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1614  i, j, media_type_string(type));
1615  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1616  ist->nb_packets, ist->data_size);
1617 
1618  if (ist->decoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1620  ist->frames_decoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "\n");
1627  }
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1630  total_packets, total_size);
1631  }
1632 
1633  for (i = 0; i < nb_output_files; i++) {
1634  OutputFile *of = output_files[i];
1635  uint64_t total_packets = 0, total_size = 0;
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1638  i, of->ctx->url);
1639 
1640  for (j = 0; j < of->ctx->nb_streams; j++) {
1641  OutputStream *ost = output_streams[of->ost_index + j];
1642  enum AVMediaType type = ost->enc_ctx->codec_type;
1643 
1644  total_size += ost->data_size;
1645  total_packets += ost->packets_written;
1646 
1647  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1648  i, j, media_type_string(type));
1649  if (ost->encoding_needed) {
1650  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1651  ost->frames_encoded);
1652  if (type == AVMEDIA_TYPE_AUDIO)
1653  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1654  av_log(NULL, AV_LOG_VERBOSE, "; ");
1655  }
1656 
1657  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1658  ost->packets_written, ost->data_size);
1659 
1660  av_log(NULL, AV_LOG_VERBOSE, "\n");
1661  }
1662 
1663  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1664  total_packets, total_size);
1665  }
1666  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1667  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1668  if (pass1_used) {
1669  av_log(NULL, AV_LOG_WARNING, "\n");
1670  } else {
1671  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1672  }
1673  }
1674 }
1675 
1676 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1677 {
1678  AVBPrint buf, buf_script;
1679  OutputStream *ost;
1680  AVFormatContext *oc;
1681  int64_t total_size;
1682  AVCodecContext *enc;
1683  int frame_number, vid, i;
1684  double bitrate;
1685  double speed;
1686  int64_t pts = INT64_MIN + 1;
1687  static int64_t last_time = -1;
1688  static int qp_histogram[52];
1689  int hours, mins, secs, us;
1690  const char *hours_sign;
1691  int ret;
1692  float t;
1693 
1694  if (!print_stats && !is_last_report && !progress_avio)
1695  return;
1696 
1697  if (!is_last_report) {
1698  if (last_time == -1) {
1699  last_time = cur_time;
1700  return;
1701  }
1702  if ((cur_time - last_time) < 500000)
1703  return;
1704  last_time = cur_time;
1705  }
1706 
1707  t = (cur_time-timer_start) / 1000000.0;
1708 
1709 
1710  oc = output_files[0]->ctx;
1711 
1712  total_size = avio_size(oc->pb);
1713  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1714  total_size = avio_tell(oc->pb);
1715 
1716  vid = 0;
1718  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1719  for (i = 0; i < nb_output_streams; i++) {
1720  float q = -1;
1721  ost = output_streams[i];
1722  enc = ost->enc_ctx;
1723  if (!ost->stream_copy)
1724  q = ost->quality / (float) FF_QP2LAMBDA;
1725 
1726  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1727  av_bprintf(&buf, "q=%2.1f ", q);
1728  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1729  ost->file_index, ost->index, q);
1730  }
1731  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1732  float fps;
1733 
1734  frame_number = ost->frame_number;
1735  fps = t > 1 ? frame_number / t : 0;
1736  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1737  frame_number, fps < 9.95, fps, q);
1738  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1739  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1740  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1741  ost->file_index, ost->index, q);
1742  if (is_last_report)
1743  av_bprintf(&buf, "L");
1744  if (qp_hist) {
1745  int j;
1746  int qp = lrintf(q);
1747  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1748  qp_histogram[qp]++;
1749  for (j = 0; j < 32; j++)
1750  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1751  }
1752 
1753  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1754  int j;
1755  double error, error_sum = 0;
1756  double scale, scale_sum = 0;
1757  double p;
1758  char type[3] = { 'Y','U','V' };
1759  av_bprintf(&buf, "PSNR=");
1760  for (j = 0; j < 3; j++) {
1761  if (is_last_report) {
1762  error = enc->error[j];
1763  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1764  } else {
1765  error = ost->error[j];
1766  scale = enc->width * enc->height * 255.0 * 255.0;
1767  }
1768  if (j)
1769  scale /= 4;
1770  error_sum += error;
1771  scale_sum += scale;
1772  p = psnr(error / scale);
1773  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1774  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1775  ost->file_index, ost->index, type[j] | 32, p);
1776  }
1777  p = psnr(error_sum / scale_sum);
1778  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1779  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1780  ost->file_index, ost->index, p);
1781  }
1782  vid = 1;
1783  }
1784  /* compute min output value */
1785  if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1786  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1787  ost->st->time_base, AV_TIME_BASE_Q));
1788  if (copy_ts) {
1789  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1792  pts -= copy_ts_first_pts;
1793  }
1794  }
1795 
1796  if (is_last_report)
1797  nb_frames_drop += ost->last_dropped;
1798  }
1799 
1800  secs = FFABS(pts) / AV_TIME_BASE;
1801  us = FFABS(pts) % AV_TIME_BASE;
1802  mins = secs / 60;
1803  secs %= 60;
1804  hours = mins / 60;
1805  mins %= 60;
1806  hours_sign = (pts < 0) ? "-" : "";
1807 
1808  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1809  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1810 
1811  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1812  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1813  if (pts == AV_NOPTS_VALUE) {
1814  av_bprintf(&buf, "N/A ");
1815  } else {
1816  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1817  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1818  }
1819 
1820  if (bitrate < 0) {
1821  av_bprintf(&buf, "bitrate=N/A");
1822  av_bprintf(&buf_script, "bitrate=N/A\n");
1823  }else{
1824  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1825  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1826  }
1827 
1828  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1829  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1830  if (pts == AV_NOPTS_VALUE) {
1831  av_bprintf(&buf_script, "out_time_us=N/A\n");
1832  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1833  av_bprintf(&buf_script, "out_time=N/A\n");
1834  } else {
1835  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1836  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1837  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1838  hours_sign, hours, mins, secs, us);
1839  }
1840 
1842  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1843  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1844  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1845 
1846  if (speed < 0) {
1847  av_bprintf(&buf, " speed=N/A");
1848  av_bprintf(&buf_script, "speed=N/A\n");
1849  } else {
1850  av_bprintf(&buf, " speed=%4.3gx", speed);
1851  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1852  }
1853 
1854  if (print_stats || is_last_report) {
1855  const char end = is_last_report ? '\n' : '\r';
1856  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1857  fprintf(stderr, "%s %c", buf.str, end);
1858  } else
1859  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1860 
1861  fflush(stderr);
1862  }
1863  av_bprint_finalize(&buf, NULL);
1864 
1865  if (progress_avio) {
1866  av_bprintf(&buf_script, "progress=%s\n",
1867  is_last_report ? "end" : "continue");
1868  avio_write(progress_avio, buf_script.str,
1869  FFMIN(buf_script.len, buf_script.size - 1));
1870  avio_flush(progress_avio);
1871  av_bprint_finalize(&buf_script, NULL);
1872  if (is_last_report) {
1873  if ((ret = avio_closep(&progress_avio)) < 0)
1875  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1876  }
1877  }
1878 
1879  if (is_last_report)
1880  print_final_stats(total_size);
1881 }
1882 
1884 {
1885  // We never got any input. Set a fake format, which will
1886  // come from libavformat.
1887  ifilter->format = par->format;
1888  ifilter->sample_rate = par->sample_rate;
1889  ifilter->channels = par->channels;
1890  ifilter->channel_layout = par->channel_layout;
1891  ifilter->width = par->width;
1892  ifilter->height = par->height;
1893  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1894 }
1895 
1896 static void flush_encoders(void)
1897 {
1898  int i, ret;
1899 
1900  for (i = 0; i < nb_output_streams; i++) {
1901  OutputStream *ost = output_streams[i];
1902  AVCodecContext *enc = ost->enc_ctx;
1903  OutputFile *of = output_files[ost->file_index];
1904 
1905  if (!ost->encoding_needed)
1906  continue;
1907 
1908  // Try to enable encoding with no input frames.
1909  // Maybe we should just let encoding fail instead.
1910  if (!ost->initialized) {
1911  FilterGraph *fg = ost->filter->graph;
1912 
1914  "Finishing stream %d:%d without any data written to it.\n",
1915  ost->file_index, ost->st->index);
1916 
1917  if (ost->filter && !fg->graph) {
1918  int x;
1919  for (x = 0; x < fg->nb_inputs; x++) {
1920  InputFilter *ifilter = fg->inputs[x];
1921  if (ifilter->format < 0)
1922  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1923  }
1924 
1926  continue;
1927 
1928  ret = configure_filtergraph(fg);
1929  if (ret < 0) {
1930  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1931  exit_program(1);
1932  }
1933 
1934  finish_output_stream(ost);
1935  }
1936 
1938  }
1939 
1941  continue;
1942 
1943  for (;;) {
1944  const char *desc = NULL;
1945  AVPacket pkt;
1946  int pkt_size;
1947 
1948  switch (enc->codec_type) {
1949  case AVMEDIA_TYPE_AUDIO:
1950  desc = "audio";
1951  break;
1952  case AVMEDIA_TYPE_VIDEO:
1953  desc = "video";
1954  break;
1955  default:
1956  av_assert0(0);
1957  }
1958 
1959  av_init_packet(&pkt);
1960  pkt.data = NULL;
1961  pkt.size = 0;
1962 
1964 
1965  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1966  ret = avcodec_send_frame(enc, NULL);
1967  if (ret < 0) {
1968  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1969  desc,
1970  av_err2str(ret));
1971  exit_program(1);
1972  }
1973  }
1974 
1975  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1976  if (ret < 0 && ret != AVERROR_EOF) {
1977  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1978  desc,
1979  av_err2str(ret));
1980  exit_program(1);
1981  }
1982  if (ost->logfile && enc->stats_out) {
1983  fprintf(ost->logfile, "%s", enc->stats_out);
1984  }
1985  if (ret == AVERROR_EOF) {
1986  output_packet(of, &pkt, ost, 1);
1987  break;
1988  }
1989  if (ost->finished & MUXER_FINISHED) {
1990  av_packet_unref(&pkt);
1991  continue;
1992  }
1993  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1994  pkt_size = pkt.size;
1995  output_packet(of, &pkt, ost, 0);
1997  do_video_stats(ost, pkt_size);
1998  }
1999  }
2000  }
2001 }
2002 
2003 /*
2004  * Check whether a packet from ist should be written into ost at this time
2005  */
2007 {
2008  OutputFile *of = output_files[ost->file_index];
2009  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2010 
2011  if (ost->source_index != ist_index)
2012  return 0;
2013 
2014  if (ost->finished)
2015  return 0;
2016 
2017  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2018  return 0;
2019 
2020  return 1;
2021 }
2022 
2023 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2024 {
2025  OutputFile *of = output_files[ost->file_index];
2026  InputFile *f = input_files [ist->file_index];
2027  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2028  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2029  AVPacket opkt;
2030 
2031  // EOF: flush output bitstream filters.
2032  if (!pkt) {
2033  av_init_packet(&opkt);
2034  opkt.data = NULL;
2035  opkt.size = 0;
2036  output_packet(of, &opkt, ost, 1);
2037  return;
2038  }
2039 
2040  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2042  return;
2043 
2044  if (!ost->frame_number && !ost->copy_prior_start) {
2045  int64_t comp_start = start_time;
2046  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2047  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2048  if (pkt->pts == AV_NOPTS_VALUE ?
2049  ist->pts < comp_start :
2050  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2051  return;
2052  }
2053 
2054  if (of->recording_time != INT64_MAX &&
2055  ist->pts >= of->recording_time + start_time) {
2056  close_output_stream(ost);
2057  return;
2058  }
2059 
2060  if (f->recording_time != INT64_MAX) {
2061  start_time = f->ctx->start_time;
2062  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2063  start_time += f->start_time;
2064  if (ist->pts >= f->recording_time + start_time) {
2065  close_output_stream(ost);
2066  return;
2067  }
2068  }
2069 
2070  /* force the input stream PTS */
2071  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2072  ost->sync_opts++;
2073 
2074  if (av_packet_ref(&opkt, pkt) < 0)
2075  exit_program(1);
2076 
2077  if (pkt->pts != AV_NOPTS_VALUE)
2078  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2079 
2080  if (pkt->dts == AV_NOPTS_VALUE) {
2081  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2082  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2084  if(!duration)
2085  duration = ist->dec_ctx->frame_size;
2086  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2087  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2089  /* dts will be set immediately afterwards to what pts is now */
2090  opkt.pts = opkt.dts - ost_tb_start_time;
2091  } else
2092  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2093  opkt.dts -= ost_tb_start_time;
2094 
2095  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2096 
2097  output_packet(of, &opkt, ost, 0);
2098 }
2099 
2101 {
2102  AVCodecContext *dec = ist->dec_ctx;
2103 
2104  if (!dec->channel_layout) {
2105  char layout_name[256];
2106 
2107  if (dec->channels > ist->guess_layout_max)
2108  return 0;
2110  if (!dec->channel_layout)
2111  return 0;
2112  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2113  dec->channels, dec->channel_layout);
2114  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2115  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2116  }
2117  return 1;
2118 }
2119 
2120 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2121 {
2122  if (*got_output || ret<0)
2123  decode_error_stat[ret<0] ++;
2124 
2125  if (ret < 0 && exit_on_error)
2126  exit_program(1);
2127 
2128  if (*got_output && ist) {
2131  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2132  if (exit_on_error)
2133  exit_program(1);
2134  }
2135  }
2136 }
2137 
2138 // Filters can be configured only if the formats of all inputs are known.
2140 {
2141  int i;
2142  for (i = 0; i < fg->nb_inputs; i++) {
2143  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2145  return 0;
2146  }
2147  return 1;
2148 }
2149 
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2151 {
2152  FilterGraph *fg = ifilter->graph;
2153  int need_reinit, ret, i;
2154 
2155  /* determine if the parameters for this input changed */
2156  need_reinit = ifilter->format != frame->format;
2157 
2158  switch (ifilter->ist->st->codecpar->codec_type) {
2159  case AVMEDIA_TYPE_AUDIO:
2160  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2161  ifilter->channels != frame->channels ||
2162  ifilter->channel_layout != frame->channel_layout;
2163  break;
2164  case AVMEDIA_TYPE_VIDEO:
2165  need_reinit |= ifilter->width != frame->width ||
2166  ifilter->height != frame->height;
2167  break;
2168  }
2169 
2170  if (!ifilter->ist->reinit_filters && fg->graph)
2171  need_reinit = 0;
2172 
2173  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2174  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2175  need_reinit = 1;
2176 
2177  if (need_reinit) {
2178  ret = ifilter_parameters_from_frame(ifilter, frame);
2179  if (ret < 0)
2180  return ret;
2181  }
2182 
2183  /* (re)init the graph if possible, otherwise buffer the frame and return */
2184  if (need_reinit || !fg->graph) {
2185  for (i = 0; i < fg->nb_inputs; i++) {
2186  if (!ifilter_has_all_input_formats(fg)) {
2187  AVFrame *tmp = av_frame_clone(frame);
2188  if (!tmp)
2189  return AVERROR(ENOMEM);
2190  av_frame_unref(frame);
2191 
2192  if (!av_fifo_space(ifilter->frame_queue)) {
2193  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2194  if (ret < 0) {
2195  av_frame_free(&tmp);
2196  return ret;
2197  }
2198  }
2199  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200  return 0;
2201  }
2202  }
2203 
2204  ret = reap_filters(1);
2205  if (ret < 0 && ret != AVERROR_EOF) {
2206  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2207  return ret;
2208  }
2209 
2210  ret = configure_filtergraph(fg);
2211  if (ret < 0) {
2212  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2213  return ret;
2214  }
2215  }
2216 
2218  if (ret < 0) {
2219  if (ret != AVERROR_EOF)
2220  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2221  return ret;
2222  }
2223 
2224  return 0;
2225 }
2226 
2227 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2228 {
2229  int ret;
2230 
2231  ifilter->eof = 1;
2232 
2233  if (ifilter->filter) {
2234  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2235  if (ret < 0)
2236  return ret;
2237  } else {
2238  // the filtergraph was never configured
2239  if (ifilter->format < 0)
2240  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2241  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2242  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2243  return AVERROR_INVALIDDATA;
2244  }
2245  }
2246 
2247  return 0;
2248 }
2249 
2250 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2251 // There is the following difference: if you got a frame, you must call
2252 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2253 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2254 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2255 {
2256  int ret;
2257 
2258  *got_frame = 0;
2259 
2260  if (pkt) {
2261  ret = avcodec_send_packet(avctx, pkt);
2262  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2263  // decoded frames with avcodec_receive_frame() until done.
2264  if (ret < 0 && ret != AVERROR_EOF)
2265  return ret;
2266  }
2267 
2268  ret = avcodec_receive_frame(avctx, frame);
2269  if (ret < 0 && ret != AVERROR(EAGAIN))
2270  return ret;
2271  if (ret >= 0)
2272  *got_frame = 1;
2273 
2274  return 0;
2275 }
2276 
2278 {
2279  int i, ret;
2280  AVFrame *f;
2281 
2282  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2283  for (i = 0; i < ist->nb_filters; i++) {
2284  if (i < ist->nb_filters - 1) {
2285  f = ist->filter_frame;
2286  ret = av_frame_ref(f, decoded_frame);
2287  if (ret < 0)
2288  break;
2289  } else
2290  f = decoded_frame;
2291  ret = ifilter_send_frame(ist->filters[i], f);
2292  if (ret == AVERROR_EOF)
2293  ret = 0; /* ignore */
2294  if (ret < 0) {
2296  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2297  break;
2298  }
2299  }
2300  return ret;
2301 }
2302 
2304  int *decode_failed)
2305 {
2307  AVCodecContext *avctx = ist->dec_ctx;
2308  int ret, err = 0;
2309  AVRational decoded_frame_tb;
2310 
2311  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2312  return AVERROR(ENOMEM);
2313  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2314  return AVERROR(ENOMEM);
2315  decoded_frame = ist->decoded_frame;
2316 
2318  ret = decode(avctx, decoded_frame, got_output, pkt);
2319  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2320  if (ret < 0)
2321  *decode_failed = 1;
2322 
2323  if (ret >= 0 && avctx->sample_rate <= 0) {
2324  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2325  ret = AVERROR_INVALIDDATA;
2326  }
2327 
2328  if (ret != AVERROR_EOF)
2329  check_decode_result(ist, got_output, ret);
2330 
2331  if (!*got_output || ret < 0)
2332  return ret;
2333 
2334  ist->samples_decoded += decoded_frame->nb_samples;
2335  ist->frames_decoded++;
2336 
2337  /* increment next_dts to use for the case where the input stream does not
2338  have timestamps or there are multiple frames in the packet */
2339  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2340  avctx->sample_rate;
2341  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2342  avctx->sample_rate;
2343 
2344  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2345  decoded_frame_tb = ist->st->time_base;
2346  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2347  decoded_frame->pts = pkt->pts;
2348  decoded_frame_tb = ist->st->time_base;
2349  }else {
2350  decoded_frame->pts = ist->dts;
2351  decoded_frame_tb = AV_TIME_BASE_Q;
2352  }
2353  if (decoded_frame->pts != AV_NOPTS_VALUE)
2354  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2355  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2356  (AVRational){1, avctx->sample_rate});
2357  ist->nb_samples = decoded_frame->nb_samples;
2358  err = send_frame_to_filters(ist, decoded_frame);
2359 
2361  av_frame_unref(decoded_frame);
2362  return err < 0 ? err : ret;
2363 }
2364 
2365 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2366  int *decode_failed)
2367 {
2369  int i, ret = 0, err = 0;
2370  int64_t best_effort_timestamp;
2371  int64_t dts = AV_NOPTS_VALUE;
2372  AVPacket avpkt;
2373 
2374  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2375  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2376  // skip the packet.
2377  if (!eof && pkt && pkt->size == 0)
2378  return 0;
2379 
2380  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2381  return AVERROR(ENOMEM);
2382  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2383  return AVERROR(ENOMEM);
2384  decoded_frame = ist->decoded_frame;
2385  if (ist->dts != AV_NOPTS_VALUE)
2386  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2387  if (pkt) {
2388  avpkt = *pkt;
2389  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2390  }
2391 
2392  // The old code used to set dts on the drain packet, which does not work
2393  // with the new API anymore.
2394  if (eof) {
2395  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2396  if (!new)
2397  return AVERROR(ENOMEM);
2398  ist->dts_buffer = new;
2399  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2400  }
2401 
2403  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2404  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2405  if (ret < 0)
2406  *decode_failed = 1;
2407 
2408  // The following line may be required in some cases where there is no parser
2409  // or the parser does not has_b_frames correctly
2410  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2411  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2412  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2413  } else
2415  "video_delay is larger in decoder than demuxer %d > %d.\n"
2416  "If you want to help, upload a sample "
2417  "of this file to https://streams.videolan.org/upload/ "
2418  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2419  ist->dec_ctx->has_b_frames,
2420  ist->st->codecpar->video_delay);
2421  }
2422 
2423  if (ret != AVERROR_EOF)
2424  check_decode_result(ist, got_output, ret);
2425 
2426  if (*got_output && ret >= 0) {
2427  if (ist->dec_ctx->width != decoded_frame->width ||
2428  ist->dec_ctx->height != decoded_frame->height ||
2429  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2430  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2431  decoded_frame->width,
2432  decoded_frame->height,
2433  decoded_frame->format,
2434  ist->dec_ctx->width,
2435  ist->dec_ctx->height,
2436  ist->dec_ctx->pix_fmt);
2437  }
2438  }
2439 
2440  if (!*got_output || ret < 0)
2441  return ret;
2442 
2443  if(ist->top_field_first>=0)
2444  decoded_frame->top_field_first = ist->top_field_first;
2445 
2446  ist->frames_decoded++;
2447 
2448  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2449  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2450  if (err < 0)
2451  goto fail;
2452  }
2453  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2454 
2455  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2456  *duration_pts = decoded_frame->pkt_duration;
2457 
2458  if (ist->framerate.num)
2459  best_effort_timestamp = ist->cfr_next_pts++;
2460 
2461  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2462  best_effort_timestamp = ist->dts_buffer[0];
2463 
2464  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2465  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2466  ist->nb_dts_buffer--;
2467  }
2468 
2469  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2470  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2471 
2472  if (ts != AV_NOPTS_VALUE)
2473  ist->next_pts = ist->pts = ts;
2474  }
2475 
2476  if (debug_ts) {
2477  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2478  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2479  ist->st->index, av_ts2str(decoded_frame->pts),
2480  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2481  best_effort_timestamp,
2482  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2483  decoded_frame->key_frame, decoded_frame->pict_type,
2484  ist->st->time_base.num, ist->st->time_base.den);
2485  }
2486 
2487  if (ist->st->sample_aspect_ratio.num)
2488  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2489 
2490  err = send_frame_to_filters(ist, decoded_frame);
2491 
2492 fail:
2494  av_frame_unref(decoded_frame);
2495  return err < 0 ? err : ret;
2496 }
2497 
2499  int *decode_failed)
2500 {
2502  int free_sub = 1;
2503  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2504  &subtitle, got_output, pkt);
2505 
2506  check_decode_result(NULL, got_output, ret);
2507 
2508  if (ret < 0 || !*got_output) {
2509  *decode_failed = 1;
2510  if (!pkt->size)
2511  sub2video_flush(ist);
2512  return ret;
2513  }
2514 
2515  if (ist->fix_sub_duration) {
2516  int end = 1;
2517  if (ist->prev_sub.got_output) {
2518  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2519  1000, AV_TIME_BASE);
2520  if (end < ist->prev_sub.subtitle.end_display_time) {
2521  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2522  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2524  end <= 0 ? ", dropping it" : "");
2526  }
2527  }
2528  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2529  FFSWAP(int, ret, ist->prev_sub.ret);
2530  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2531  if (end <= 0)
2532  goto out;
2533  }
2534 
2535  if (!*got_output)
2536  return ret;
2537 
2538  if (ist->sub2video.frame) {
2539  sub2video_update(ist, INT64_MIN, &subtitle);
2540  } else if (ist->nb_filters) {
2541  if (!ist->sub2video.sub_queue)
2542  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2543  if (!ist->sub2video.sub_queue)
2544  exit_program(1);
2545  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2547  if (ret < 0)
2548  exit_program(1);
2549  }
2550  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2551  free_sub = 0;
2552  }
2553 
2554  if (!subtitle.num_rects)
2555  goto out;
2556 
2557  ist->frames_decoded++;
2558 
2559  for (i = 0; i < nb_output_streams; i++) {
2560  OutputStream *ost = output_streams[i];
2561 
2562  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2563  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2564  continue;
2565 
2566  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2567  }
2568 
2569 out:
2570  if (free_sub)
2571  avsubtitle_free(&subtitle);
2572  return ret;
2573 }
2574 
2576 {
2577  int i, ret;
2578  /* TODO keep pts also in stream time base to avoid converting back */
2579  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2581 
2582  for (i = 0; i < ist->nb_filters; i++) {
2583  ret = ifilter_send_eof(ist->filters[i], pts);
2584  if (ret < 0)
2585  return ret;
2586  }
2587  return 0;
2588 }
2589 
2590 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2591 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2592 {
2593  int ret = 0, i;
2594  int repeating = 0;
2595  int eof_reached = 0;
2596 
2597  AVPacket avpkt;
2598  if (!ist->saw_first_ts) {
2599  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2600  ist->pts = 0;
2601  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2602  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2603  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2604  }
2605  ist->saw_first_ts = 1;
2606  }
2607 
2608  if (ist->next_dts == AV_NOPTS_VALUE)
2609  ist->next_dts = ist->dts;
2610  if (ist->next_pts == AV_NOPTS_VALUE)
2611  ist->next_pts = ist->pts;
2612 
2613  if (!pkt) {
2614  /* EOF handling */
2615  av_init_packet(&avpkt);
2616  avpkt.data = NULL;
2617  avpkt.size = 0;
2618  } else {
2619  avpkt = *pkt;
2620  }
2621 
2622  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2623  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2624  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2625  ist->next_pts = ist->pts = ist->dts;
2626  }
2627 
2628  // while we have more to decode or while the decoder did output something on EOF
2629  while (ist->decoding_needed) {
2630  int64_t duration_dts = 0;
2631  int64_t duration_pts = 0;
2632  int got_output = 0;
2633  int decode_failed = 0;
2634 
2635  ist->pts = ist->next_pts;
2636  ist->dts = ist->next_dts;
2637 
2638  switch (ist->dec_ctx->codec_type) {
2639  case AVMEDIA_TYPE_AUDIO:
2640  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2641  &decode_failed);
2642  break;
2643  case AVMEDIA_TYPE_VIDEO:
2644  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2645  &decode_failed);
2646  if (!repeating || !pkt || got_output) {
2647  if (pkt && pkt->duration) {
2648  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2651  duration_dts = ((int64_t)AV_TIME_BASE *
2652  ist->dec_ctx->framerate.den * ticks) /
2654  }
2655 
2656  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2657  ist->next_dts += duration_dts;
2658  }else
2659  ist->next_dts = AV_NOPTS_VALUE;
2660  }
2661 
2662  if (got_output) {
2663  if (duration_pts > 0) {
2664  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2665  } else {
2666  ist->next_pts += duration_dts;
2667  }
2668  }
2669  break;
2670  case AVMEDIA_TYPE_SUBTITLE:
2671  if (repeating)
2672  break;
2673  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2674  if (!pkt && ret >= 0)
2675  ret = AVERROR_EOF;
2676  break;
2677  default:
2678  return -1;
2679  }
2680 
2681  if (ret == AVERROR_EOF) {
2682  eof_reached = 1;
2683  break;
2684  }
2685 
2686  if (ret < 0) {
2687  if (decode_failed) {
2688  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2689  ist->file_index, ist->st->index, av_err2str(ret));
2690  } else {
2691  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2692  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2693  }
2694  if (!decode_failed || exit_on_error)
2695  exit_program(1);
2696  break;
2697  }
2698 
2699  if (got_output)
2700  ist->got_output = 1;
2701 
2702  if (!got_output)
2703  break;
2704 
2705  // During draining, we might get multiple output frames in this loop.
2706  // ffmpeg.c does not drain the filter chain on configuration changes,
2707  // which means if we send multiple frames at once to the filters, and
2708  // one of those frames changes configuration, the buffered frames will
2709  // be lost. This can upset certain FATE tests.
2710  // Decode only 1 frame per call on EOF to appease these FATE tests.
2711  // The ideal solution would be to rewrite decoding to use the new
2712  // decoding API in a better way.
2713  if (!pkt)
2714  break;
2715 
2716  repeating = 1;
2717  }
2718 
2719  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2720  /* except when looping we need to flush but not to send an EOF */
2721  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2722  int ret = send_filter_eof(ist);
2723  if (ret < 0) {
2724  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2725  exit_program(1);
2726  }
2727  }
2728 
2729  /* handle stream copy */
2730  if (!ist->decoding_needed && pkt) {
2731  ist->dts = ist->next_dts;
2732  switch (ist->dec_ctx->codec_type) {
2733  case AVMEDIA_TYPE_AUDIO:
2734  av_assert1(pkt->duration >= 0);
2735  if (ist->dec_ctx->sample_rate) {
2736  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2737  ist->dec_ctx->sample_rate;
2738  } else {
2739  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2740  }
2741  break;
2742  case AVMEDIA_TYPE_VIDEO:
2743  if (ist->framerate.num) {
2744  // TODO: Remove work-around for c99-to-c89 issue 7
2745  AVRational time_base_q = AV_TIME_BASE_Q;
2746  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2747  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2748  } else if (pkt->duration) {
2749  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2750  } else if(ist->dec_ctx->framerate.num != 0) {
2751  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2752  ist->next_dts += ((int64_t)AV_TIME_BASE *
2753  ist->dec_ctx->framerate.den * ticks) /
2755  }
2756  break;
2757  }
2758  ist->pts = ist->dts;
2759  ist->next_pts = ist->next_dts;
2760  }
2761  for (i = 0; i < nb_output_streams; i++) {
2762  OutputStream *ost = output_streams[i];
2763 
2764  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2765  continue;
2766 
2767  do_streamcopy(ist, ost, pkt);
2768  }
2769 
2770  return !eof_reached;
2771 }
2772 
2773 static void print_sdp(void)
2774 {
2775  char sdp[16384];
2776  int i;
2777  int j;
2778  AVIOContext *sdp_pb;
2779  AVFormatContext **avc;
2780 
2781  for (i = 0; i < nb_output_files; i++) {
2782  if (!output_files[i]->header_written)
2783  return;
2784  }
2785 
2786  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2787  if (!avc)
2788  exit_program(1);
2789  for (i = 0, j = 0; i < nb_output_files; i++) {
2790  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2791  avc[j] = output_files[i]->ctx;
2792  j++;
2793  }
2794  }
2795 
2796  if (!j)
2797  goto fail;
2798 
2799  av_sdp_create(avc, j, sdp, sizeof(sdp));
2800 
2801  if (!sdp_filename) {
2802  printf("SDP:\n%s\n", sdp);
2803  fflush(stdout);
2804  } else {
2805  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2806  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2807  } else {
2808  avio_print(sdp_pb, sdp);
2809  avio_closep(&sdp_pb);
2811  }
2812  }
2813 
2814 fail:
2815  av_freep(&avc);
2816 }
2817 
2819 {
2820  InputStream *ist = s->opaque;
2821  const enum AVPixelFormat *p;
2822  int ret;
2823 
2824  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2826  const AVCodecHWConfig *config = NULL;
2827  int i;
2828 
2829  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2830  break;
2831 
2832  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2833  ist->hwaccel_id == HWACCEL_AUTO) {
2834  for (i = 0;; i++) {
2835  config = avcodec_get_hw_config(s->codec, i);
2836  if (!config)
2837  break;
2838  if (!(config->methods &
2840  continue;
2841  if (config->pix_fmt == *p)
2842  break;
2843  }
2844  }
2845  if (config) {
2846  if (config->device_type != ist->hwaccel_device_type) {
2847  // Different hwaccel offered, ignore.
2848  continue;
2849  }
2850 
2851  ret = hwaccel_decode_init(s);
2852  if (ret < 0) {
2853  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2855  "%s hwaccel requested for input stream #%d:%d, "
2856  "but cannot be initialized.\n",
2858  ist->file_index, ist->st->index);
2859  return AV_PIX_FMT_NONE;
2860  }
2861  continue;
2862  }
2863  } else {
2864  const HWAccel *hwaccel = NULL;
2865  int i;
2866  for (i = 0; hwaccels[i].name; i++) {
2867  if (hwaccels[i].pix_fmt == *p) {
2868  hwaccel = &hwaccels[i];
2869  break;
2870  }
2871  }
2872  if (!hwaccel) {
2873  // No hwaccel supporting this pixfmt.
2874  continue;
2875  }
2876  if (hwaccel->id != ist->hwaccel_id) {
2877  // Does not match requested hwaccel.
2878  continue;
2879  }
2880 
2881  ret = hwaccel->init(s);
2882  if (ret < 0) {
2884  "%s hwaccel requested for input stream #%d:%d, "
2885  "but cannot be initialized.\n", hwaccel->name,
2886  ist->file_index, ist->st->index);
2887  return AV_PIX_FMT_NONE;
2888  }
2889  }
2890 
2891  if (ist->hw_frames_ctx) {
2893  if (!s->hw_frames_ctx)
2894  return AV_PIX_FMT_NONE;
2895  }
2896 
2897  ist->hwaccel_pix_fmt = *p;
2898  break;
2899  }
2900 
2901  return *p;
2902 }
2903 
2904 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2905 {
2906  InputStream *ist = s->opaque;
2907 
2908  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2909  return ist->hwaccel_get_buffer(s, frame, flags);
2910 
2911  return avcodec_default_get_buffer2(s, frame, flags);
2912 }
2913 
2914 static int init_input_stream(int ist_index, char *error, int error_len)
2915 {
2916  int ret;
2917  InputStream *ist = input_streams[ist_index];
2918 
2919  if (ist->decoding_needed) {
2920  AVCodec *codec = ist->dec;
2921  if (!codec) {
2922  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2923  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2924  return AVERROR(EINVAL);
2925  }
2926 
2927  ist->dec_ctx->opaque = ist;
2928  ist->dec_ctx->get_format = get_format;
2929  ist->dec_ctx->get_buffer2 = get_buffer;
2930 #if LIBAVCODEC_VERSION_MAJOR < 60
2931  ist->dec_ctx->thread_safe_callbacks = 1;
2932 #endif
2933 
2934  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2935  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2936  (ist->decoding_needed & DECODING_FOR_OST)) {
2937  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2939  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2940  }
2941 
2942  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2943 
2944  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2945  * audio, and video decoders such as cuvid or mediacodec */
2946  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2947 
2948  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2949  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2950  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2952  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2953 
2954  ret = hw_device_setup_for_decode(ist);
2955  if (ret < 0) {
2956  snprintf(error, error_len, "Device setup failed for "
2957  "decoder on input stream #%d:%d : %s",
2958  ist->file_index, ist->st->index, av_err2str(ret));
2959  return ret;
2960  }
2961 
2962  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2963  if (ret == AVERROR_EXPERIMENTAL)
2964  abort_codec_experimental(codec, 0);
2965 
2966  snprintf(error, error_len,
2967  "Error while opening decoder for input stream "
2968  "#%d:%d : %s",
2969  ist->file_index, ist->st->index, av_err2str(ret));
2970  return ret;
2971  }
2973  }
2974 
2975  ist->next_pts = AV_NOPTS_VALUE;
2976  ist->next_dts = AV_NOPTS_VALUE;
2977 
2978  return 0;
2979 }
2980 
2982 {
2983  if (ost->source_index >= 0)
2984  return input_streams[ost->source_index];
2985  return NULL;
2986 }
2987 
2988 static int compare_int64(const void *a, const void *b)
2989 {
2990  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2991 }
2992 
2993 /* open the muxer when all the streams are initialized */
2995 {
2996  int ret, i;
2997 
2998  for (i = 0; i < of->ctx->nb_streams; i++) {
2999  OutputStream *ost = output_streams[of->ost_index + i];
3000  if (!ost->initialized)
3001  return 0;
3002  }
3003 
3004  of->ctx->interrupt_callback = int_cb;
3005 
3006  ret = avformat_write_header(of->ctx, &of->opts);
3007  if (ret < 0) {
3009  "Could not write header for output file #%d "
3010  "(incorrect codec parameters ?): %s\n",
3011  file_index, av_err2str(ret));
3012  return ret;
3013  }
3014  //assert_avoptions(of->opts);
3015  of->header_written = 1;
3016 
3017  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3018 
3019  if (sdp_filename || want_sdp)
3020  print_sdp();
3021 
3022  /* flush the muxing queues */
3023  for (i = 0; i < of->ctx->nb_streams; i++) {
3024  OutputStream *ost = output_streams[of->ost_index + i];
3025 
3026  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3027  if (!av_fifo_size(ost->muxing_queue))
3028  ost->mux_timebase = ost->st->time_base;
3029 
3030  while (av_fifo_size(ost->muxing_queue)) {
3031  AVPacket pkt;
3032  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3033  ost->muxing_queue_data_size -= pkt.size;
3034  write_packet(of, &pkt, ost, 1);
3035  }
3036  }
3037 
3038  return 0;
3039 }
3040 
3042 {
3043  AVBSFContext *ctx = ost->bsf_ctx;
3044  int ret;
3045 
3046  if (!ctx)
3047  return 0;
3048 
3049  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3050  if (ret < 0)
3051  return ret;
3052 
3053  ctx->time_base_in = ost->st->time_base;
3054 
3055  ret = av_bsf_init(ctx);
3056  if (ret < 0) {
3057  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3058  ctx->filter->name);
3059  return ret;
3060  }
3061 
3062  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3063  if (ret < 0)
3064  return ret;
3065  ost->st->time_base = ctx->time_base_out;
3066 
3067  return 0;
3068 }
3069 
3071 {
3072  OutputFile *of = output_files[ost->file_index];
3073  InputStream *ist = get_input_stream(ost);
3074  AVCodecParameters *par_dst = ost->st->codecpar;
3075  AVCodecParameters *par_src = ost->ref_par;
3076  AVRational sar;
3077  int i, ret;
3078  uint32_t codec_tag = par_dst->codec_tag;
3079 
3080  av_assert0(ist && !ost->filter);
3081 
3082  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3083  if (ret >= 0)
3084  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3085  if (ret < 0) {
3087  "Error setting up codec context options.\n");
3088  return ret;
3089  }
3090 
3091  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3092  if (ret < 0) {
3094  "Error getting reference codec parameters.\n");
3095  return ret;
3096  }
3097 
3098  if (!codec_tag) {
3099  unsigned int codec_tag_tmp;
3100  if (!of->ctx->oformat->codec_tag ||
3101  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3102  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3103  codec_tag = par_src->codec_tag;
3104  }
3105 
3106  ret = avcodec_parameters_copy(par_dst, par_src);
3107  if (ret < 0)
3108  return ret;
3109 
3110  par_dst->codec_tag = codec_tag;
3111 
3112  if (!ost->frame_rate.num)
3113  ost->frame_rate = ist->framerate;
3114  ost->st->avg_frame_rate = ost->frame_rate;
3115 
3117  if (ret < 0)
3118  return ret;
3119 
3120  // copy timebase while removing common factors
3121  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3123 
3124  // copy estimated duration as a hint to the muxer
3125  if (ost->st->duration <= 0 && ist->st->duration > 0)
3126  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3127 
3128  // copy disposition
3129  ost->st->disposition = ist->st->disposition;
3130 
3131  if (ist->st->nb_side_data) {
3132  for (i = 0; i < ist->st->nb_side_data; i++) {
3133  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3134  uint8_t *dst_data;
3135 
3136  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3137  if (!dst_data)
3138  return AVERROR(ENOMEM);
3139  memcpy(dst_data, sd_src->data, sd_src->size);
3140  }
3141  }
3142 
3143  if (ost->rotate_overridden) {
3145  sizeof(int32_t) * 9);
3146  if (sd)
3148  }
3149 
3150  switch (par_dst->codec_type) {
3151  case AVMEDIA_TYPE_AUDIO:
3152  if (audio_volume != 256) {
3153  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3154  exit_program(1);
3155  }
3156  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3157  par_dst->block_align= 0;
3158  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3159  par_dst->block_align= 0;
3160  break;
3161  case AVMEDIA_TYPE_VIDEO:
3162  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3163  sar =
3165  (AVRational){ par_dst->height, par_dst->width });
3166  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3167  "with stream copy may produce invalid files\n");
3168  }
3169  else if (ist->st->sample_aspect_ratio.num)
3170  sar = ist->st->sample_aspect_ratio;
3171  else
3172  sar = par_src->sample_aspect_ratio;
3173  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3174  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3175  ost->st->r_frame_rate = ist->st->r_frame_rate;
3176  break;
3177  }
3178 
3179  ost->mux_timebase = ist->st->time_base;
3180 
3181  return 0;
3182 }
3183 
3185 {
3186  AVDictionaryEntry *e;
3187 
3188  uint8_t *encoder_string;
3189  int encoder_string_len;
3190  int format_flags = 0;
3191  int codec_flags = ost->enc_ctx->flags;
3192 
3193  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3194  return;
3195 
3196  e = av_dict_get(of->opts, "fflags", NULL, 0);
3197  if (e) {
3198  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3199  if (!o)
3200  return;
3201  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3202  }
3203  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3204  if (e) {
3205  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3206  if (!o)
3207  return;
3208  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3209  }
3210 
3211  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3212  encoder_string = av_mallocz(encoder_string_len);
3213  if (!encoder_string)
3214  exit_program(1);
3215 
3216  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3217  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3218  else
3219  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3220  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3221  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3223 }
3224 
3225 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3226  AVCodecContext *avctx)
3227 {
3228  char *p;
3229  int n = 1, i, size, index = 0;
3230  int64_t t, *pts;
3231 
3232  for (p = kf; *p; p++)
3233  if (*p == ',')
3234  n++;
3235  size = n;
3236  pts = av_malloc_array(size, sizeof(*pts));
3237  if (!pts) {
3238  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3239  exit_program(1);
3240  }
3241 
3242  p = kf;
3243  for (i = 0; i < n; i++) {
3244  char *next = strchr(p, ',');
3245 
3246  if (next)
3247  *next++ = 0;
3248 
3249  if (!memcmp(p, "chapters", 8)) {
3250 
3251  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3252  int j;
3253 
3254  if (avf->nb_chapters > INT_MAX - size ||
3255  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3256  sizeof(*pts)))) {
3258  "Could not allocate forced key frames array.\n");
3259  exit_program(1);
3260  }
3261  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3262  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3263 
3264  for (j = 0; j < avf->nb_chapters; j++) {
3265  AVChapter *c = avf->chapters[j];
3266  av_assert1(index < size);
3267  pts[index++] = av_rescale_q(c->start, c->time_base,
3268  avctx->time_base) + t;
3269  }
3270 
3271  } else {
3272 
3273  t = parse_time_or_die("force_key_frames", p, 1);
3274  av_assert1(index < size);
3275  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3276 
3277  }
3278 
3279  p = next;
3280  }
3281 
3282  av_assert0(index == size);
3283  qsort(pts, size, sizeof(*pts), compare_int64);
3284  ost->forced_kf_count = size;
3285  ost->forced_kf_pts = pts;
3286 }
3287 
3288 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3289 {
3290  InputStream *ist = get_input_stream(ost);
3291  AVCodecContext *enc_ctx = ost->enc_ctx;
3292  AVFormatContext *oc;
3293 
3294  if (ost->enc_timebase.num > 0) {
3295  enc_ctx->time_base = ost->enc_timebase;
3296  return;
3297  }
3298 
3299  if (ost->enc_timebase.num < 0) {
3300  if (ist) {
3301  enc_ctx->time_base = ist->st->time_base;
3302  return;
3303  }
3304 
3305  oc = output_files[ost->file_index]->ctx;
3306  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3307  }
3308 
3309  enc_ctx->time_base = default_time_base;
3310 }
3311 
3313 {
3314  InputStream *ist = get_input_stream(ost);
3315  AVCodecContext *enc_ctx = ost->enc_ctx;
3317  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3318  int j, ret;
3319 
3320  set_encoder_id(output_files[ost->file_index], ost);
3321 
3322  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3323  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3324  // which have to be filtered out to prevent leaking them to output files.
3325  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3326 
3327  if (ist) {
3328  ost->st->disposition = ist->st->disposition;
3329 
3330  dec_ctx = ist->dec_ctx;
3331 
3332  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3333  } else {
3334  for (j = 0; j < oc->nb_streams; j++) {
3335  AVStream *st = oc->streams[j];
3336  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3337  break;
3338  }
3339  if (j == oc->nb_streams)
3340  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3343  }
3344 
3345  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3346  if (!ost->frame_rate.num)
3348  if (ist && !ost->frame_rate.num)
3349  ost->frame_rate = ist->framerate;
3350  if (ist && !ost->frame_rate.num)
3351  ost->frame_rate = ist->st->r_frame_rate;
3352  if (ist && !ost->frame_rate.num) {
3353  ost->frame_rate = (AVRational){25, 1};
3355  "No information "
3356  "about the input framerate is available. Falling "
3357  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3358  "if you want a different framerate.\n",
3359  ost->file_index, ost->index);
3360  }
3361 
3362  if (ost->enc->supported_framerates && !ost->force_fps) {
3363  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3364  ost->frame_rate = ost->enc->supported_framerates[idx];
3365  }
3366  // reduce frame rate for mpeg4 to be within the spec limits
3367  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3368  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3369  ost->frame_rate.num, ost->frame_rate.den, 65535);
3370  }
3371  }
3372 
3373  switch (enc_ctx->codec_type) {
3374  case AVMEDIA_TYPE_AUDIO:
3376  if (dec_ctx)
3377  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3378  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3382 
3383  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3384  break;
3385 
3386  case AVMEDIA_TYPE_VIDEO:
3388 
3389  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3391  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3393  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3394  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3395  }
3396 
3397  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3398  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3399  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3400  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3401  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3403 
3404  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3405  if (dec_ctx)
3406  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3407  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3408 
3409  if (frame) {
3410  enc_ctx->color_range = frame->color_range;
3411  enc_ctx->color_primaries = frame->color_primaries;
3412  enc_ctx->color_trc = frame->color_trc;
3413  enc_ctx->colorspace = frame->colorspace;
3414  enc_ctx->chroma_sample_location = frame->chroma_location;
3415  }
3416 
3417  enc_ctx->framerate = ost->frame_rate;
3418 
3419  ost->st->avg_frame_rate = ost->frame_rate;
3420 
3421  if (!dec_ctx ||
3422  enc_ctx->width != dec_ctx->width ||
3423  enc_ctx->height != dec_ctx->height ||
3424  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3426  }
3427 
3428  if (ost->top_field_first == 0) {
3429  enc_ctx->field_order = AV_FIELD_BB;
3430  } else if (ost->top_field_first == 1) {
3431  enc_ctx->field_order = AV_FIELD_TT;
3432  }
3433 
3434  if (frame) {
3436  ost->top_field_first >= 0)
3437  frame->top_field_first = !!ost->top_field_first;
3438 
3439  if (frame->interlaced_frame) {
3440  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3441  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3442  else
3443  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3444  } else
3445  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3446  }
3447 
3448  if (ost->forced_keyframes) {
3449  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3452  if (ret < 0) {
3454  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3455  return ret;
3456  }
3461 
3462  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3463  // parse it only for static kf timings
3464  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3466  }
3467  }
3468  break;
3469  case AVMEDIA_TYPE_SUBTITLE:
3470  enc_ctx->time_base = AV_TIME_BASE_Q;
3471  if (!enc_ctx->width) {
3472  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3473  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3474  }
3475  break;
3476  case AVMEDIA_TYPE_DATA:
3477  break;
3478  default:
3479  abort();
3480  break;
3481  }
3482 
3483  ost->mux_timebase = enc_ctx->time_base;
3484 
3485  return 0;
3486 }
3487 
3488 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3489  char *error, int error_len)
3490 {
3491  int ret = 0;
3492 
3493  if (ost->encoding_needed) {
3494  AVCodec *codec = ost->enc;
3495  AVCodecContext *dec = NULL;
3496  InputStream *ist;
3497 
3498  ret = init_output_stream_encode(ost, frame);
3499  if (ret < 0)
3500  return ret;
3501 
3502  if ((ist = get_input_stream(ost)))
3503  dec = ist->dec_ctx;
3504  if (dec && dec->subtitle_header) {
3505  /* ASS code assumes this buffer is null terminated so add extra byte. */
3507  if (!ost->enc_ctx->subtitle_header)
3508  return AVERROR(ENOMEM);
3509  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3511  }
3512  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3513  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3514  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3515  !codec->defaults &&
3516  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3517  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3518  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3519 
3520  ret = hw_device_setup_for_encode(ost);
3521  if (ret < 0) {
3522  snprintf(error, error_len, "Device setup failed for "
3523  "encoder on output stream #%d:%d : %s",
3524  ost->file_index, ost->index, av_err2str(ret));
3525  return ret;
3526  }
3527 
3528  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3529  int input_props = 0, output_props = 0;
3530  AVCodecDescriptor const *input_descriptor =
3532  AVCodecDescriptor const *output_descriptor =
3534  if (input_descriptor)
3535  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3536  if (output_descriptor)
3537  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3538  if (input_props && output_props && input_props != output_props) {
3539  snprintf(error, error_len,
3540  "Subtitle encoding currently only possible from text to text "
3541  "or bitmap to bitmap");
3542  return AVERROR_INVALIDDATA;
3543  }
3544  }
3545 
3546  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3547  if (ret == AVERROR_EXPERIMENTAL)
3548  abort_codec_experimental(codec, 1);
3549  snprintf(error, error_len,
3550  "Error while opening encoder for output stream #%d:%d - "
3551  "maybe incorrect parameters such as bit_rate, rate, width or height",
3552  ost->file_index, ost->index);
3553  return ret;
3554  }
3555  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3556  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3558  ost->enc_ctx->frame_size);
3560  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3561  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3562  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3563  " It takes bits/s as argument, not kbits/s\n");
3564 
3566  if (ret < 0) {
3568  "Error initializing the output stream codec context.\n");
3569  exit_program(1);
3570  }
3571  /*
3572  * FIXME: ost->st->codec should't be needed here anymore.
3573  */
3574  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3575  if (ret < 0)
3576  return ret;
3577 
3578  if (ost->enc_ctx->nb_coded_side_data) {
3579  int i;
3580 
3581  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3582  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3583  uint8_t *dst_data;
3584 
3585  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3586  if (!dst_data)
3587  return AVERROR(ENOMEM);
3588  memcpy(dst_data, sd_src->data, sd_src->size);
3589  }
3590  }
3591 
3592  /*
3593  * Add global input side data. For now this is naive, and copies it
3594  * from the input stream's global side data. All side data should
3595  * really be funneled over AVFrame and libavfilter, then added back to
3596  * packet side data, and then potentially using the first packet for
3597  * global side data.
3598  */
3599  if (ist) {
3600  int i;
3601  for (i = 0; i < ist->st->nb_side_data; i++) {
3602  AVPacketSideData *sd = &ist->st->side_data[i];
3603  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3604  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3605  if (!dst)
3606  return AVERROR(ENOMEM);
3607  memcpy(dst, sd->data, sd->size);
3608  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3609  av_display_rotation_set((uint32_t *)dst, 0);
3610  }
3611  }
3612  }
3613 
3614  // copy timebase while removing common factors
3615  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3616  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3617 
3618  // copy estimated duration as a hint to the muxer
3619  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3620  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3621 
3622  ost->st->codec->codec= ost->enc_ctx->codec;
3623  } else if (ost->stream_copy) {
3624  ret = init_output_stream_streamcopy(ost);
3625  if (ret < 0)
3626  return ret;
3627  }
3628 
3629  // parse user provided disposition, and update stream values
3630  if (ost->disposition) {
3631  static const AVOption opts[] = {
3632  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3633  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3634  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3635  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3636  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3637  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3638  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3639  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3640  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3641  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3642  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3643  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3644  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3645  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3646  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3647  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3648  { NULL },
3649  };
3650  static const AVClass class = {
3651  .class_name = "",
3652  .item_name = av_default_item_name,
3653  .option = opts,
3654  .version = LIBAVUTIL_VERSION_INT,
3655  };
3656  const AVClass *pclass = &class;
3657 
3658  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3659  if (ret < 0)
3660  return ret;
3661  }
3662 
3663  /* initialize bitstream filters for the output stream
3664  * needs to be done here, because the codec id for streamcopy is not
3665  * known until now */
3666  ret = init_output_bsfs(ost);
3667  if (ret < 0)
3668  return ret;
3669 
3670  ost->initialized = 1;
3671 
3672  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3673  if (ret < 0)
3674  return ret;
3675 
3676  return ret;
3677 }
3678 
3679 static void report_new_stream(int input_index, AVPacket *pkt)
3680 {
3681  InputFile *file = input_files[input_index];
3682  AVStream *st = file->ctx->streams[pkt->stream_index];
3683 
3684  if (pkt->stream_index < file->nb_streams_warn)
3685  return;
3686  av_log(file->ctx, AV_LOG_WARNING,
3687  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3689  input_index, pkt->stream_index,
3690  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3691  file->nb_streams_warn = pkt->stream_index + 1;
3692 }
3693 
3694 static int transcode_init(void)
3695 {
3696  int ret = 0, i, j, k;
3697  AVFormatContext *oc;
3698  OutputStream *ost;
3699  InputStream *ist;
3700  char error[1024] = {0};
3701 
3702  for (i = 0; i < nb_filtergraphs; i++) {
3703  FilterGraph *fg = filtergraphs[i];
3704  for (j = 0; j < fg->nb_outputs; j++) {
3705  OutputFilter *ofilter = fg->outputs[j];
3706  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3707  continue;
3708  if (fg->nb_inputs != 1)
3709  continue;
3710  for (k = nb_input_streams-1; k >= 0 ; k--)
3711  if (fg->inputs[0]->ist == input_streams[k])
3712  break;
3713  ofilter->ost->source_index = k;
3714  }
3715  }
3716 
3717  /* init framerate emulation */
3718  for (i = 0; i < nb_input_files; i++) {
3719  InputFile *ifile = input_files[i];
3720  if (ifile->rate_emu)
3721  for (j = 0; j < ifile->nb_streams; j++)
3722  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3723  }
3724 
3725  /* init input streams */
3726  for (i = 0; i < nb_input_streams; i++)
3727  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3728  for (i = 0; i < nb_output_streams; i++) {
3729  ost = output_streams[i];
3730  avcodec_close(ost->enc_ctx);
3731  }
3732  goto dump_format;
3733  }
3734 
3735  /*
3736  * initialize stream copy and subtitle/data streams.
3737  * Encoded AVFrame based streams will get initialized as follows:
3738  * - when the first AVFrame is received in do_video_out
3739  * - just before the first AVFrame is received in either transcode_step
3740  * or reap_filters due to us requiring the filter chain buffer sink
3741  * to be configured with the correct audio frame size, which is only
3742  * known after the encoder is initialized.
3743  */
3744  for (i = 0; i < nb_output_streams; i++) {
3745  if (!output_streams[i]->stream_copy &&
3746  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3747  output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3748  continue;
3749 
3750  ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3751  if (ret < 0)
3752  goto dump_format;
3753  }
3754 
3755  /* discard unused programs */
3756  for (i = 0; i < nb_input_files; i++) {
3757  InputFile *ifile = input_files[i];
3758  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3759  AVProgram *p = ifile->ctx->programs[j];
3760  int discard = AVDISCARD_ALL;
3761 
3762  for (k = 0; k < p->nb_stream_indexes; k++)
3763  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3764  discard = AVDISCARD_DEFAULT;
3765  break;
3766  }
3767  p->discard = discard;
3768  }
3769  }
3770 
3771  /* write headers for files with no streams */
3772  for (i = 0; i < nb_output_files; i++) {
3773  oc = output_files[i]->ctx;
3774  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3775  ret = check_init_output_file(output_files[i], i);
3776  if (ret < 0)
3777  goto dump_format;
3778  }
3779  }
3780 
3781  dump_format:
3782  /* dump the stream mapping */
3783  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3784  for (i = 0; i < nb_input_streams; i++) {
3785  ist = input_streams[i];
3786 
3787  for (j = 0; j < ist->nb_filters; j++) {
3788  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3789  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3790  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3791  ist->filters[j]->name);
3792  if (nb_filtergraphs > 1)
3793  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3794  av_log(NULL, AV_LOG_INFO, "\n");
3795  }
3796  }
3797  }
3798 
3799  for (i = 0; i < nb_output_streams; i++) {
3800  ost = output_streams[i];
3801 
3802  if (ost->attachment_filename) {
3803  /* an attached file */
3804  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3805  ost->attachment_filename, ost->file_index, ost->index);
3806  continue;
3807  }
3808 
3809  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3810  /* output from a complex graph */
3811  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3812  if (nb_filtergraphs > 1)
3813  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3814 
3815  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3816  ost->index, ost->enc ? ost->enc->name : "?");
3817  continue;
3818  }
3819 
3820  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3821  input_streams[ost->source_index]->file_index,
3822  input_streams[ost->source_index]->st->index,
3823  ost->file_index,
3824  ost->index);
3825  if (ost->sync_ist != input_streams[ost->source_index])
3826  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3827  ost->sync_ist->file_index,
3828  ost->sync_ist->st->index);
3829  if (ost->stream_copy)
3830  av_log(NULL, AV_LOG_INFO, " (copy)");
3831  else {
3832  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3833  const AVCodec *out_codec = ost->enc;
3834  const char *decoder_name = "?";
3835  const char *in_codec_name = "?";
3836  const char *encoder_name = "?";
3837  const char *out_codec_name = "?";
3838  const AVCodecDescriptor *desc;
3839 
3840  if (in_codec) {
3841  decoder_name = in_codec->name;
3842  desc = avcodec_descriptor_get(in_codec->id);
3843  if (desc)
3844  in_codec_name = desc->name;
3845  if (!strcmp(decoder_name, in_codec_name))
3846  decoder_name = "native";
3847  }
3848 
3849  if (out_codec) {
3850  encoder_name = out_codec->name;
3851  desc = avcodec_descriptor_get(out_codec->id);
3852  if (desc)
3853  out_codec_name = desc->name;
3854  if (!strcmp(encoder_name, out_codec_name))
3855  encoder_name = "native";
3856  }
3857 
3858  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3859  in_codec_name, decoder_name,
3860  out_codec_name, encoder_name);
3861  }
3862  av_log(NULL, AV_LOG_INFO, "\n");
3863  }
3864 
3865  if (ret) {
3866  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3867  return ret;
3868  }
3869 
3871 
3872  return 0;
3873 }
3874 
3875 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3876 static int need_output(void)
3877 {
3878  int i;
3879 
3880  for (i = 0; i < nb_output_streams; i++) {
3881  OutputStream *ost = output_streams[i];
3882  OutputFile *of = output_files[ost->file_index];
3883  AVFormatContext *os = output_files[ost->file_index]->ctx;
3884 
3885  if (ost->finished ||
3886  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3887  continue;
3888  if (ost->frame_number >= ost->max_frames) {
3889  int j;
3890  for (j = 0; j < of->ctx->nb_streams; j++)
3891  close_output_stream(output_streams[of->ost_index + j]);
3892  continue;
3893  }
3894 
3895  return 1;
3896  }
3897 
3898  return 0;
3899 }
3900 
3901 /**
3902  * Select the output stream to process.
3903  *
3904  * @return selected output stream, or NULL if none available
3905  */
3907 {
3908  int i;
3909  int64_t opts_min = INT64_MAX;
3910  OutputStream *ost_min = NULL;
3911 
3912  for (i = 0; i < nb_output_streams; i++) {
3913  OutputStream *ost = output_streams[i];
3914  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3915  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3916  AV_TIME_BASE_Q);
3917  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3919  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3920  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3921 
3922  if (!ost->initialized && !ost->inputs_done)
3923  return ost;
3924 
3925  if (!ost->finished && opts < opts_min) {
3926  opts_min = opts;
3927  ost_min = ost->unavailable ? NULL : ost;
3928  }
3929  }
3930  return ost_min;
3931 }
3932 
3933 static void set_tty_echo(int on)
3934 {
3935 #if HAVE_TERMIOS_H
3936  struct termios tty;
3937  if (tcgetattr(0, &tty) == 0) {
3938  if (on) tty.c_lflag |= ECHO;
3939  else tty.c_lflag &= ~ECHO;
3940  tcsetattr(0, TCSANOW, &tty);
3941  }
3942 #endif
3943 }
3944 
3945 static int check_keyboard_interaction(int64_t cur_time)
3946 {
3947  int i, ret, key;
3948  static int64_t last_time;
3949  if (received_nb_signals)
3950  return AVERROR_EXIT;
3951  /* read_key() returns 0 on EOF */
3952  if(cur_time - last_time >= 100000 && !run_as_daemon){
3953  key = read_key();
3954  last_time = cur_time;
3955  }else
3956  key = -1;
3957  if (key == 'q')
3958  return AVERROR_EXIT;
3959  if (key == '+') av_log_set_level(av_log_get_level()+10);
3960  if (key == '-') av_log_set_level(av_log_get_level()-10);
3961  if (key == 's') qp_hist ^= 1;
3962  if (key == 'h'){
3963  if (do_hex_dump){
3964  do_hex_dump = do_pkt_dump = 0;
3965  } else if(do_pkt_dump){
3966  do_hex_dump = 1;
3967  } else
3968  do_pkt_dump = 1;
3970  }
3971  if (key == 'c' || key == 'C'){
3972  char buf[4096], target[64], command[256], arg[256] = {0};
3973  double time;
3974  int k, n = 0;
3975  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3976  i = 0;
3977  set_tty_echo(1);
3978  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3979  if (k > 0)
3980  buf[i++] = k;
3981  buf[i] = 0;
3982  set_tty_echo(0);
3983  fprintf(stderr, "\n");
3984  if (k > 0 &&
3985  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3986  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3987  target, time, command, arg);
3988  for (i = 0; i < nb_filtergraphs; i++) {
3989  FilterGraph *fg = filtergraphs[i];
3990  if (fg->graph) {
3991  if (time < 0) {
3992  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3993  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3994  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3995  } else if (key == 'c') {
3996  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3997  ret = AVERROR_PATCHWELCOME;
3998  } else {
3999  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4000  if (ret < 0)
4001  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4002  }
4003  }
4004  }
4005  } else {
4007  "Parse error, at least 3 arguments were expected, "
4008  "only %d given in string '%s'\n", n, buf);
4009  }
4010  }
4011  if (key == 'd' || key == 'D'){
4012  int debug=0;
4013  if(key == 'D') {
4014  debug = input_streams[0]->st->codec->debug<<1;
4015  if(!debug) debug = 1;
4016  while(debug & (FF_DEBUG_DCT_COEFF
4017 #if FF_API_DEBUG_MV
4018  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
4019 #endif
4020  )) //unsupported, would just crash
4021  debug += debug;
4022  }else{
4023  char buf[32];
4024  int k = 0;
4025  i = 0;
4026  set_tty_echo(1);
4027  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4028  if (k > 0)
4029  buf[i++] = k;
4030  buf[i] = 0;
4031  set_tty_echo(0);
4032  fprintf(stderr, "\n");
4033  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4034  fprintf(stderr,"error parsing debug value\n");
4035  }
4036  for(i=0;i<nb_input_streams;i++) {
4037  input_streams[i]->st->codec->debug = debug;
4038  }
4039  for(i=0;i<nb_output_streams;i++) {
4040  OutputStream *ost = output_streams[i];
4041  ost->enc_ctx->debug = debug;
4042  }
4043  if(debug) av_log_set_level(AV_LOG_DEBUG);
4044  fprintf(stderr,"debug=%d\n", debug);
4045  }
4046  if (key == '?'){
4047  fprintf(stderr, "key function\n"
4048  "? show this help\n"
4049  "+ increase verbosity\n"
4050  "- decrease verbosity\n"
4051  "c Send command to first matching filter supporting it\n"
4052  "C Send/Queue command to all matching filters\n"
4053  "D cycle through available debug modes\n"
4054  "h dump packets/hex press to cycle through the 3 states\n"
4055  "q quit\n"
4056  "s Show QP histogram\n"
4057  );
4058  }
4059  return 0;
4060 }
4061 
4062 #if HAVE_THREADS
4063 static void *input_thread(void *arg)
4064 {
4065  InputFile *f = arg;
4066  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4067  int ret = 0;
4068 
4069  while (1) {
4070  AVPacket pkt;
4071  ret = av_read_frame(f->ctx, &pkt);
4072 
4073  if (ret == AVERROR(EAGAIN)) {
4074  av_usleep(10000);
4075  continue;
4076  }
4077  if (ret < 0) {
4078  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4079  break;
4080  }
4081  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4082  if (flags && ret == AVERROR(EAGAIN)) {
4083  flags = 0;
4084  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4086  "Thread message queue blocking; consider raising the "
4087  "thread_queue_size option (current value: %d)\n",
4088  f->thread_queue_size);
4089  }
4090  if (ret < 0) {
4091  if (ret != AVERROR_EOF)
4092  av_log(f->ctx, AV_LOG_ERROR,
4093  "Unable to send packet to main thread: %s\n",
4094  av_err2str(ret));
4095  av_packet_unref(&pkt);
4096  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4097  break;
4098  }
4099  }
4100 
4101  return NULL;
4102 }
4103 
4104 static void free_input_thread(int i)
4105 {
4106  InputFile *f = input_files[i];
4107  AVPacket pkt;
4108 
4109  if (!f || !f->in_thread_queue)
4110  return;
4112  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4113  av_packet_unref(&pkt);
4114 
4115  pthread_join(f->thread, NULL);
4116  f->joined = 1;
4117  av_thread_message_queue_free(&f->in_thread_queue);
4118 }
4119 
4120 static void free_input_threads(void)
4121 {
4122  int i;
4123 
4124  for (i = 0; i < nb_input_files; i++)
4125  free_input_thread(i);
4126 }
4127 
4128 static int init_input_thread(int i)
4129 {
4130  int ret;
4131  InputFile *f = input_files[i];
4132 
4133  if (f->thread_queue_size < 0)
4134  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4135  if (!f->thread_queue_size)
4136  return 0;
4137 
4138  if (f->ctx->pb ? !f->ctx->pb->seekable :
4139  strcmp(f->ctx->iformat->name, "lavfi"))
4140  f->non_blocking = 1;
4141  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4142  f->thread_queue_size, sizeof(AVPacket));
4143  if (ret < 0)
4144  return ret;
4145 
4146  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4147  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4148  av_thread_message_queue_free(&f->in_thread_queue);
4149  return AVERROR(ret);
4150  }
4151 
4152  return 0;
4153 }
4154 
4155 static int init_input_threads(void)
4156 {
4157  int i, ret;
4158 
4159  for (i = 0; i < nb_input_files; i++) {
4160  ret = init_input_thread(i);
4161  if (ret < 0)
4162  return ret;
4163  }
4164  return 0;
4165 }
4166 
4167 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4168 {
4169  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4170  f->non_blocking ?
4172 }
4173 #endif
4174 
4176 {
4177  if (f->rate_emu) {
4178  int i;
4179  for (i = 0; i < f->nb_streams; i++) {
4180  InputStream *ist = input_streams[f->ist_index + i];
4181  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4182  int64_t now = av_gettime_relative() - ist->start;
4183  if (pts > now)
4184  return AVERROR(EAGAIN);
4185  }
4186  }
4187 
4188 #if HAVE_THREADS
4189  if (f->thread_queue_size)
4190  return get_input_packet_mt(f, pkt);
4191 #endif
4192  return av_read_frame(f->ctx, pkt);
4193 }
4194 
4195 static int got_eagain(void)
4196 {
4197  int i;
4198  for (i = 0; i < nb_output_streams; i++)
4199  if (output_streams[i]->unavailable)
4200  return 1;
4201  return 0;
4202 }
4203 
4204 static void reset_eagain(void)
4205 {
4206  int i;
4207  for (i = 0; i < nb_input_files; i++)
4208  input_files[i]->eagain = 0;
4209  for (i = 0; i < nb_output_streams; i++)
4210  output_streams[i]->unavailable = 0;
4211 }
4212 
4213 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4214 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4215  AVRational time_base)
4216 {
4217  int ret;
4218 
4219  if (!*duration) {
4220  *duration = tmp;
4221  return tmp_time_base;
4222  }
4223 
4224  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4225  if (ret < 0) {
4226  *duration = tmp;
4227  return tmp_time_base;
4228  }
4229 
4230  return time_base;
4231 }
4232 
4234 {
4235  InputStream *ist;
4236  AVCodecContext *avctx;
4237  int i, ret, has_audio = 0;
4238  int64_t duration = 0;
4239 
4240  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4241  if (ret < 0)
4242  return ret;
4243 
4244  for (i = 0; i < ifile->nb_streams; i++) {
4245  ist = input_streams[ifile->ist_index + i];
4246  avctx = ist->dec_ctx;
4247 
4248  /* duration is the length of the last frame in a stream
4249  * when audio stream is present we don't care about
4250  * last video frame length because it's not defined exactly */
4251  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4252  has_audio = 1;
4253  }
4254 
4255  for (i = 0; i < ifile->nb_streams; i++) {
4256  ist = input_streams[ifile->ist_index + i];
4257  avctx = ist->dec_ctx;
4258 
4259  if (has_audio) {
4260  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4261  AVRational sample_rate = {1, avctx->sample_rate};
4262 
4263  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4264  } else {
4265  continue;
4266  }
4267  } else {
4268  if (ist->framerate.num) {
4269  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4270  } else if (ist->st->avg_frame_rate.num) {
4271  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4272  } else {
4273  duration = 1;
4274  }
4275  }
4276  if (!ifile->duration)
4277  ifile->time_base = ist->st->time_base;
4278  /* the total duration of the stream, max_pts - min_pts is
4279  * the duration of the stream without the last frame */
4280  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4281  duration += ist->max_pts - ist->min_pts;
4282  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4283  ifile->time_base);
4284  }
4285 
4286  if (ifile->loop > 0)
4287  ifile->loop--;
4288 
4289  return ret;
4290 }
4291 
4292 /*
4293  * Return
4294  * - 0 -- one packet was read and processed
4295  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4296  * this function should be called again
4297  * - AVERROR_EOF -- this function should not be called again
4298  */
4299 static int process_input(int file_index)
4300 {
4301  InputFile *ifile = input_files[file_index];
4303  InputStream *ist;
4304  AVPacket pkt;
4305  int ret, thread_ret, i, j;
4306  int64_t duration;
4307  int64_t pkt_dts;
4308  int disable_discontinuity_correction = copy_ts;
4309 
4310  is = ifile->ctx;
4311  ret = get_input_packet(ifile, &pkt);
4312 
4313  if (ret == AVERROR(EAGAIN)) {
4314  ifile->eagain = 1;
4315  return ret;
4316  }
4317  if (ret < 0 && ifile->loop) {
4318  AVCodecContext *avctx;
4319  for (i = 0; i < ifile->nb_streams; i++) {
4320  ist = input_streams[ifile->ist_index + i];
4321  avctx = ist->dec_ctx;
4322  if (ist->decoding_needed) {
4323  ret = process_input_packet(ist, NULL, 1);
4324  if (ret>0)
4325  return 0;
4326  avcodec_flush_buffers(avctx);
4327  }
4328  }
4329 #if HAVE_THREADS
4330  free_input_thread(file_index);
4331 #endif
4332  ret = seek_to_start(ifile, is);
4333 #if HAVE_THREADS
4334  thread_ret = init_input_thread(file_index);
4335  if (thread_ret < 0)
4336  return thread_ret;
4337 #endif
4338  if (ret < 0)
4339  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4340  else
4341  ret = get_input_packet(ifile, &pkt);
4342  if (ret == AVERROR(EAGAIN)) {
4343  ifile->eagain = 1;
4344  return ret;
4345  }
4346  }
4347  if (ret < 0) {
4348  if (ret != AVERROR_EOF) {
4349  print_error(is->url, ret);
4350  if (exit_on_error)
4351  exit_program(1);
4352  }
4353 
4354  for (i = 0; i < ifile->nb_streams; i++) {
4355  ist = input_streams[ifile->ist_index + i];
4356  if (ist->decoding_needed) {
4357  ret = process_input_packet(ist, NULL, 0);
4358  if (ret>0)
4359  return 0;
4360  }
4361 
4362  /* mark all outputs that don't go through lavfi as finished */
4363  for (j = 0; j < nb_output_streams; j++) {
4364  OutputStream *ost = output_streams[j];
4365 
4366  if (ost->source_index == ifile->ist_index + i &&
4367  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4368  finish_output_stream(ost);
4369  }
4370  }
4371 
4372  ifile->eof_reached = 1;
4373  return AVERROR(EAGAIN);
4374  }
4375 
4376  reset_eagain();
4377 
4378  if (do_pkt_dump) {
4380  is->streams[pkt.stream_index]);
4381  }
4382  /* the following test is needed in case new streams appear
4383  dynamically in stream : we ignore them */
4384  if (pkt.stream_index >= ifile->nb_streams) {
4385  report_new_stream(file_index, &pkt);
4386  goto discard_packet;
4387  }
4388 
4389  ist = input_streams[ifile->ist_index + pkt.stream_index];
4390 
4391  ist->data_size += pkt.size;
4392  ist->nb_packets++;
4393 
4394  if (ist->discard)
4395  goto discard_packet;
4396 
4397  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4399  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4400  if (exit_on_error)
4401  exit_program(1);
4402  }
4403 
4404  if (debug_ts) {
4405  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4406  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4410  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4411  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4412  av_ts2str(input_files[ist->file_index]->ts_offset),
4413  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4414  }
4415 
4416  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4417  int64_t stime, stime2;
4418  // Correcting starttime based on the enabled streams
4419  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4420  // so we instead do it here as part of discontinuity handling
4421  if ( ist->next_dts == AV_NOPTS_VALUE
4422  && ifile->ts_offset == -is->start_time
4423  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4424  int64_t new_start_time = INT64_MAX;
4425  for (i=0; i<is->nb_streams; i++) {
4426  AVStream *st = is->streams[i];
4427  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4428  continue;
4429  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4430  }
4431  if (new_start_time > is->start_time) {
4432  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4433  ifile->ts_offset = -new_start_time;
4434  }
4435  }
4436 
4437  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4438  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4439  ist->wrap_correction_done = 1;
4440 
4441  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4442  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4443  ist->wrap_correction_done = 0;
4444  }
4445  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4446  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4447  ist->wrap_correction_done = 0;
4448  }
4449  }
4450 
4451  /* add the stream-global side data to the first packet */
4452  if (ist->nb_packets == 1) {
4453  for (i = 0; i < ist->st->nb_side_data; i++) {
4454  AVPacketSideData *src_sd = &ist->st->side_data[i];
4455  uint8_t *dst_data;
4456 
4457  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4458  continue;
4459 
4460  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4461  continue;
4462 
4463  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4464  if (!dst_data)
4465  exit_program(1);
4466 
4467  memcpy(dst_data, src_sd->data, src_sd->size);
4468  }
4469  }
4470 
4471  if (pkt.dts != AV_NOPTS_VALUE)
4472  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4473  if (pkt.pts != AV_NOPTS_VALUE)
4474  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4475 
4476  if (pkt.pts != AV_NOPTS_VALUE)
4477  pkt.pts *= ist->ts_scale;
4478  if (pkt.dts != AV_NOPTS_VALUE)
4479  pkt.dts *= ist->ts_scale;
4480 
4482  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4484  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4485  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4486  int64_t delta = pkt_dts - ifile->last_ts;
4487  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4488  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4489  ifile->ts_offset -= delta;
4491  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4492  delta, ifile->ts_offset);
4493  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4494  if (pkt.pts != AV_NOPTS_VALUE)
4495  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4496  }
4497  }
4498 
4499  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4500  if (pkt.pts != AV_NOPTS_VALUE) {
4501  pkt.pts += duration;
4502  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4503  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4504  }
4505 
4506  if (pkt.dts != AV_NOPTS_VALUE)
4507  pkt.dts += duration;
4508 
4510 
4511  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4512  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4513  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4514  ist->st->time_base, AV_TIME_BASE_Q,
4516  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4517  disable_discontinuity_correction = 0;
4518  }
4519 
4520  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4522  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4523  !disable_discontinuity_correction) {
4524  int64_t delta = pkt_dts - ist->next_dts;
4525  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4526  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4527  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4528  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4529  ifile->ts_offset -= delta;
4531  "timestamp discontinuity for stream #%d:%d "
4532  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4533  ist->file_index, ist->st->index, ist->st->id,
4535  delta, ifile->ts_offset);
4536  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4537  if (pkt.pts != AV_NOPTS_VALUE)
4538  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4539  }
4540  } else {
4541  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4542  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4543  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4544  pkt.dts = AV_NOPTS_VALUE;
4545  }
4546  if (pkt.pts != AV_NOPTS_VALUE){
4547  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4548  delta = pkt_pts - ist->next_dts;
4549  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4550  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4551  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4552  pkt.pts = AV_NOPTS_VALUE;
4553  }
4554  }
4555  }
4556  }
4557 
4558  if (pkt.dts != AV_NOPTS_VALUE)
4559  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4560 
4561  if (debug_ts) {
4562  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4564  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4565  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4566  av_ts2str(input_files[ist->file_index]->ts_offset),
4567  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4568  }
4569 
4570  sub2video_heartbeat(ist, pkt.pts);
4571 
4572  process_input_packet(ist, &pkt, 0);
4573