FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
133 
134 static int want_sdp = 1;
135 
136 static int current_time;
138 
140 
145 
150 
153 
154 #if HAVE_TERMIOS_H
155 
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
159 #endif
160 
161 #if HAVE_THREADS
162 static void free_input_threads(void);
163 #endif
164 
165 /* sub2video hack:
166  Convert subtitles to video with alpha to insert them in filter graphs.
167  This is a temporary solution until libavfilter gets real subtitles support.
168  */
169 
171 {
172  int ret;
173  AVFrame *frame = ist->sub2video.frame;
174 
175  av_frame_unref(frame);
176  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
179  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180  return ret;
181  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
182  return 0;
183 }
184 
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186  AVSubtitleRect *r)
187 {
188  uint32_t *pal, *dst2;
189  uint8_t *src, *src2;
190  int x, y;
191 
192  if (r->type != SUBTITLE_BITMAP) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194  return;
195  }
196  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198  r->x, r->y, r->w, r->h, w, h
199  );
200  return;
201  }
202 
203  dst += r->y * dst_linesize + r->x * 4;
204  src = r->data[0];
205  pal = (uint32_t *)r->data[1];
206  for (y = 0; y < r->h; y++) {
207  dst2 = (uint32_t *)dst;
208  src2 = src;
209  for (x = 0; x < r->w; x++)
210  *(dst2++) = pal[*(src2++)];
211  dst += dst_linesize;
212  src += r->linesize[0];
213  }
214 }
215 
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 {
218  AVFrame *frame = ist->sub2video.frame;
219  int i;
220  int ret;
221 
222  av_assert1(frame->data[0]);
223  ist->sub2video.last_pts = frame->pts = pts;
224  for (i = 0; i < ist->nb_filters; i++) {
225  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228  if (ret != AVERROR_EOF && ret < 0)
229  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
230  av_err2str(ret));
231  }
232 }
233 
235 {
236  AVFrame *frame = ist->sub2video.frame;
237  int8_t *dst;
238  int dst_linesize;
239  int num_rects, i;
240  int64_t pts, end_pts;
241 
242  if (!frame)
243  return;
244  if (sub) {
245  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
248  AV_TIME_BASE_Q, ist->st->time_base);
249  num_rects = sub->num_rects;
250  } else {
251  pts = ist->sub2video.end_pts;
252  end_pts = INT64_MAX;
253  num_rects = 0;
254  }
255  if (sub2video_get_blank_frame(ist) < 0) {
257  "Impossible to get a blank canvas.\n");
258  return;
259  }
260  dst = frame->data [0];
261  dst_linesize = frame->linesize[0];
262  for (i = 0; i < num_rects; i++)
263  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
264  sub2video_push_ref(ist, pts);
265  ist->sub2video.end_pts = end_pts;
266 }
267 
268 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
269 {
270  InputFile *infile = input_files[ist->file_index];
271  int i, j, nb_reqs;
272  int64_t pts2;
273 
274  /* When a frame is read from a file, examine all sub2video streams in
275  the same file and send the sub2video frame again. Otherwise, decoded
276  video frames could be accumulating in the filter graph while a filter
277  (possibly overlay) is desperately waiting for a subtitle frame. */
278  for (i = 0; i < infile->nb_streams; i++) {
279  InputStream *ist2 = input_streams[infile->ist_index + i];
280  if (!ist2->sub2video.frame)
281  continue;
282  /* subtitles seem to be usually muxed ahead of other streams;
283  if not, subtracting a larger time here is necessary */
284  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
285  /* do not send the heartbeat frame if the subtitle is already ahead */
286  if (pts2 <= ist2->sub2video.last_pts)
287  continue;
288  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
289  sub2video_update(ist2, NULL);
290  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
291  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
292  if (nb_reqs)
293  sub2video_push_ref(ist2, pts2);
294  }
295 }
296 
297 static void sub2video_flush(InputStream *ist)
298 {
299  int i;
300  int ret;
301 
302  if (ist->sub2video.end_pts < INT64_MAX)
303  sub2video_update(ist, NULL);
304  for (i = 0; i < ist->nb_filters; i++) {
305  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
306  if (ret != AVERROR_EOF && ret < 0)
307  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
308  }
309 }
310 
311 /* end of sub2video hack */
312 
313 static void term_exit_sigsafe(void)
314 {
315 #if HAVE_TERMIOS_H
316  if(restore_tty)
317  tcsetattr (0, TCSANOW, &oldtty);
318 #endif
319 }
320 
321 void term_exit(void)
322 {
323  av_log(NULL, AV_LOG_QUIET, "%s", "");
325 }
326 
327 static volatile int received_sigterm = 0;
328 static volatile int received_nb_signals = 0;
330 static volatile int ffmpeg_exited = 0;
331 static int main_return_code = 0;
332 
333 static void
335 {
336  int ret;
337  received_sigterm = sig;
340  if(received_nb_signals > 3) {
341  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
342  strlen("Received > 3 system signals, hard exiting\n"));
343  if (ret < 0) { /* Do nothing */ };
344  exit(123);
345  }
346 }
347 
348 #if HAVE_SETCONSOLECTRLHANDLER
349 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
350 {
351  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
352 
353  switch (fdwCtrlType)
354  {
355  case CTRL_C_EVENT:
356  case CTRL_BREAK_EVENT:
357  sigterm_handler(SIGINT);
358  return TRUE;
359 
360  case CTRL_CLOSE_EVENT:
361  case CTRL_LOGOFF_EVENT:
362  case CTRL_SHUTDOWN_EVENT:
363  sigterm_handler(SIGTERM);
364  /* Basically, with these 3 events, when we return from this method the
365  process is hard terminated, so stall as long as we need to
366  to try and let the main thread(s) clean up and gracefully terminate
367  (we have at most 5 seconds, but should be done far before that). */
368  while (!ffmpeg_exited) {
369  Sleep(0);
370  }
371  return TRUE;
372 
373  default:
374  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
375  return FALSE;
376  }
377 }
378 #endif
379 
380 void term_init(void)
381 {
382 #if HAVE_TERMIOS_H
384  struct termios tty;
385  if (tcgetattr (0, &tty) == 0) {
386  oldtty = tty;
387  restore_tty = 1;
388 
389  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
390  |INLCR|IGNCR|ICRNL|IXON);
391  tty.c_oflag |= OPOST;
392  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
393  tty.c_cflag &= ~(CSIZE|PARENB);
394  tty.c_cflag |= CS8;
395  tty.c_cc[VMIN] = 1;
396  tty.c_cc[VTIME] = 0;
397 
398  tcsetattr (0, TCSANOW, &tty);
399  }
400  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
401  }
402 #endif
403 
404  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
405  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
406 #ifdef SIGXCPU
407  signal(SIGXCPU, sigterm_handler);
408 #endif
409 #if HAVE_SETCONSOLECTRLHANDLER
410  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
411 #endif
412 }
413 
414 /* read a key without blocking */
415 static int read_key(void)
416 {
417  unsigned char ch;
418 #if HAVE_TERMIOS_H
419  int n = 1;
420  struct timeval tv;
421  fd_set rfds;
422 
423  FD_ZERO(&rfds);
424  FD_SET(0, &rfds);
425  tv.tv_sec = 0;
426  tv.tv_usec = 0;
427  n = select(1, &rfds, NULL, NULL, &tv);
428  if (n > 0) {
429  n = read(0, &ch, 1);
430  if (n == 1)
431  return ch;
432 
433  return n;
434  }
435 #elif HAVE_KBHIT
436 # if HAVE_PEEKNAMEDPIPE
437  static int is_pipe;
438  static HANDLE input_handle;
439  DWORD dw, nchars;
440  if(!input_handle){
441  input_handle = GetStdHandle(STD_INPUT_HANDLE);
442  is_pipe = !GetConsoleMode(input_handle, &dw);
443  }
444 
445  if (is_pipe) {
446  /* When running under a GUI, you will end here. */
447  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
448  // input pipe may have been closed by the program that ran ffmpeg
449  return -1;
450  }
451  //Read it
452  if(nchars != 0) {
453  read(0, &ch, 1);
454  return ch;
455  }else{
456  return -1;
457  }
458  }
459 # endif
460  if(kbhit())
461  return(getch());
462 #endif
463  return -1;
464 }
465 
466 static int decode_interrupt_cb(void *ctx)
467 {
469 }
470 
472 
473 static void ffmpeg_cleanup(int ret)
474 {
475  int i, j;
476 
477  if (do_benchmark) {
478  int maxrss = getmaxrss() / 1024;
479  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
480  }
481 
482  for (i = 0; i < nb_filtergraphs; i++) {
483  FilterGraph *fg = filtergraphs[i];
485  for (j = 0; j < fg->nb_inputs; j++) {
486  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
487  AVFrame *frame;
488  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
489  sizeof(frame), NULL);
490  av_frame_free(&frame);
491  }
492  av_fifo_freep(&fg->inputs[j]->frame_queue);
493  if (fg->inputs[j]->ist->sub2video.sub_queue) {
494  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
495  AVSubtitle sub;
497  &sub, sizeof(sub), NULL);
498  avsubtitle_free(&sub);
499  }
501  }
503  av_freep(&fg->inputs[j]->name);
504  av_freep(&fg->inputs[j]);
505  }
506  av_freep(&fg->inputs);
507  for (j = 0; j < fg->nb_outputs; j++) {
508  av_freep(&fg->outputs[j]->name);
509  av_freep(&fg->outputs[j]->formats);
510  av_freep(&fg->outputs[j]->channel_layouts);
511  av_freep(&fg->outputs[j]->sample_rates);
512  av_freep(&fg->outputs[j]);
513  }
514  av_freep(&fg->outputs);
515  av_freep(&fg->graph_desc);
516 
517  av_freep(&filtergraphs[i]);
518  }
519  av_freep(&filtergraphs);
520 
522 
523  /* close files */
524  for (i = 0; i < nb_output_files; i++) {
525  OutputFile *of = output_files[i];
527  if (!of)
528  continue;
529  s = of->ctx;
530  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
531  avio_closep(&s->pb);
533  av_dict_free(&of->opts);
534 
535  av_freep(&output_files[i]);
536  }
537  for (i = 0; i < nb_output_streams; i++) {
538  OutputStream *ost = output_streams[i];
539 
540  if (!ost)
541  continue;
542 
543  for (j = 0; j < ost->nb_bitstream_filters; j++)
544  av_bsf_free(&ost->bsf_ctx[j]);
545  av_freep(&ost->bsf_ctx);
546 
548  av_frame_free(&ost->last_frame);
549  av_dict_free(&ost->encoder_opts);
550 
551  av_parser_close(ost->parser);
553 
554  av_freep(&ost->forced_keyframes);
556  av_freep(&ost->avfilter);
557  av_freep(&ost->logfile_prefix);
558 
560  ost->audio_channels_mapped = 0;
561 
562  av_dict_free(&ost->sws_dict);
563 
566 
567  if (ost->muxing_queue) {
568  while (av_fifo_size(ost->muxing_queue)) {
569  AVPacket pkt;
570  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
571  av_packet_unref(&pkt);
572  }
574  }
575 
576  av_freep(&output_streams[i]);
577  }
578 #if HAVE_THREADS
579  free_input_threads();
580 #endif
581  for (i = 0; i < nb_input_files; i++) {
582  avformat_close_input(&input_files[i]->ctx);
583  av_freep(&input_files[i]);
584  }
585  for (i = 0; i < nb_input_streams; i++) {
586  InputStream *ist = input_streams[i];
587 
590  av_dict_free(&ist->decoder_opts);
593  av_freep(&ist->filters);
594  av_freep(&ist->hwaccel_device);
595  av_freep(&ist->dts_buffer);
596 
598 
599  av_freep(&input_streams[i]);
600  }
601 
602  if (vstats_file) {
603  if (fclose(vstats_file))
605  "Error closing vstats file, loss of information possible: %s\n",
606  av_err2str(AVERROR(errno)));
607  }
609 
610  av_freep(&input_streams);
611  av_freep(&input_files);
612  av_freep(&output_streams);
613  av_freep(&output_files);
614 
615  uninit_opts();
616 
618 
619  if (received_sigterm) {
620  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
621  (int) received_sigterm);
622  } else if (ret && atomic_load(&transcode_init_done)) {
623  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
624  }
625  term_exit();
626  ffmpeg_exited = 1;
627 }
628 
630 {
631  AVDictionaryEntry *t = NULL;
632 
633  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
635  }
636 }
637 
639 {
641  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
642  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
643  exit_program(1);
644  }
645 }
646 
647 static void abort_codec_experimental(AVCodec *c, int encoder)
648 {
649  exit_program(1);
650 }
651 
652 static void update_benchmark(const char *fmt, ...)
653 {
654  if (do_benchmark_all) {
655  int64_t t = getutime();
656  va_list va;
657  char buf[1024];
658 
659  if (fmt) {
660  va_start(va, fmt);
661  vsnprintf(buf, sizeof(buf), fmt, va);
662  va_end(va);
663  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
664  }
665  current_time = t;
666  }
667 }
668 
669 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
670 {
671  int i;
672  for (i = 0; i < nb_output_streams; i++) {
673  OutputStream *ost2 = output_streams[i];
674  ost2->finished |= ost == ost2 ? this_stream : others;
675  }
676 }
677 
678 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
679 {
680  AVFormatContext *s = of->ctx;
681  AVStream *st = ost->st;
682  int ret;
683 
684  /*
685  * Audio encoders may split the packets -- #frames in != #packets out.
686  * But there is no reordering, so we can limit the number of output packets
687  * by simply dropping them here.
688  * Counting encoded video frames needs to be done separately because of
689  * reordering, see do_video_out().
690  * Do not count the packet when unqueued because it has been counted when queued.
691  */
692  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
693  if (ost->frame_number >= ost->max_frames) {
694  av_packet_unref(pkt);
695  return;
696  }
697  ost->frame_number++;
698  }
699 
700  if (!of->header_written) {
701  AVPacket tmp_pkt = {0};
702  /* the muxer is not initialized yet, buffer the packet */
703  if (!av_fifo_space(ost->muxing_queue)) {
704  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
705  ost->max_muxing_queue_size);
706  if (new_size <= av_fifo_size(ost->muxing_queue)) {
708  "Too many packets buffered for output stream %d:%d.\n",
709  ost->file_index, ost->st->index);
710  exit_program(1);
711  }
712  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
713  if (ret < 0)
714  exit_program(1);
715  }
716  ret = av_packet_ref(&tmp_pkt, pkt);
717  if (ret < 0)
718  exit_program(1);
719  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
720  av_packet_unref(pkt);
721  return;
722  }
723 
726  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
727 
728  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
729  int i;
731  NULL);
732  ost->quality = sd ? AV_RL32(sd) : -1;
733  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
734 
735  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
736  if (sd && i < sd[5])
737  ost->error[i] = AV_RL64(sd + 8 + 8*i);
738  else
739  ost->error[i] = -1;
740  }
741 
742  if (ost->frame_rate.num && ost->is_cfr) {
743  if (pkt->duration > 0)
744  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
745  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
746  ost->mux_timebase);
747  }
748  }
749 
750  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
751 
752  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
753  if (pkt->dts != AV_NOPTS_VALUE &&
754  pkt->pts != AV_NOPTS_VALUE &&
755  pkt->dts > pkt->pts) {
756  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
757  pkt->dts, pkt->pts,
758  ost->file_index, ost->st->index);
759  pkt->pts =
760  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
761  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
762  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
763  }
765  pkt->dts != AV_NOPTS_VALUE &&
766  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
767  ost->last_mux_dts != AV_NOPTS_VALUE) {
768  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
769  if (pkt->dts < max) {
770  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
771  av_log(s, loglevel, "Non-monotonous DTS in output stream "
772  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
773  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
774  if (exit_on_error) {
775  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
776  exit_program(1);
777  }
778  av_log(s, loglevel, "changing to %"PRId64". This may result "
779  "in incorrect timestamps in the output file.\n",
780  max);
781  if (pkt->pts >= pkt->dts)
782  pkt->pts = FFMAX(pkt->pts, max);
783  pkt->dts = max;
784  }
785  }
786  }
787  ost->last_mux_dts = pkt->dts;
788 
789  ost->data_size += pkt->size;
790  ost->packets_written++;
791 
792  pkt->stream_index = ost->index;
793 
794  if (debug_ts) {
795  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
796  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
798  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
799  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
800  pkt->size
801  );
802  }
803 
804  ret = av_interleaved_write_frame(s, pkt);
805  if (ret < 0) {
806  print_error("av_interleaved_write_frame()", ret);
807  main_return_code = 1;
809  }
810  av_packet_unref(pkt);
811 }
812 
814 {
815  OutputFile *of = output_files[ost->file_index];
816 
817  ost->finished |= ENCODER_FINISHED;
818  if (of->shortest) {
819  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
820  of->recording_time = FFMIN(of->recording_time, end);
821  }
822 }
823 
824 /*
825  * Send a single packet to the output, applying any bitstream filters
826  * associated with the output stream. This may result in any number
827  * of packets actually being written, depending on what bitstream
828  * filters are applied. The supplied packet is consumed and will be
829  * blank (as if newly-allocated) when this function returns.
830  *
831  * If eof is set, instead indicate EOF to all bitstream filters and
832  * therefore flush any delayed packets to the output. A blank packet
833  * must be supplied in this case.
834  */
836  OutputStream *ost, int eof)
837 {
838  int ret = 0;
839 
840  /* apply the output bitstream filters, if any */
841  if (ost->nb_bitstream_filters) {
842  int idx;
843 
844  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
845  if (ret < 0)
846  goto finish;
847 
848  eof = 0;
849  idx = 1;
850  while (idx) {
851  /* get a packet from the previous filter up the chain */
852  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
853  if (ret == AVERROR(EAGAIN)) {
854  ret = 0;
855  idx--;
856  continue;
857  } else if (ret == AVERROR_EOF) {
858  eof = 1;
859  } else if (ret < 0)
860  goto finish;
861 
862  /* send it to the next filter down the chain or to the muxer */
863  if (idx < ost->nb_bitstream_filters) {
864  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
865  if (ret < 0)
866  goto finish;
867  idx++;
868  eof = 0;
869  } else if (eof)
870  goto finish;
871  else
872  write_packet(of, pkt, ost, 0);
873  }
874  } else if (!eof)
875  write_packet(of, pkt, ost, 0);
876 
877 finish:
878  if (ret < 0 && ret != AVERROR_EOF) {
879  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
880  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
881  if(exit_on_error)
882  exit_program(1);
883  }
884 }
885 
887 {
888  OutputFile *of = output_files[ost->file_index];
889 
890  if (of->recording_time != INT64_MAX &&
892  AV_TIME_BASE_Q) >= 0) {
893  close_output_stream(ost);
894  return 0;
895  }
896  return 1;
897 }
898 
899 static void do_audio_out(OutputFile *of, OutputStream *ost,
900  AVFrame *frame)
901 {
902  AVCodecContext *enc = ost->enc_ctx;
903  AVPacket pkt;
904  int ret;
905 
906  av_init_packet(&pkt);
907  pkt.data = NULL;
908  pkt.size = 0;
909 
910  if (!check_recording_time(ost))
911  return;
912 
913  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
914  frame->pts = ost->sync_opts;
915  ost->sync_opts = frame->pts + frame->nb_samples;
916  ost->samples_encoded += frame->nb_samples;
917  ost->frames_encoded++;
918 
919  av_assert0(pkt.size || !pkt.data);
921  if (debug_ts) {
922  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
923  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
924  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
925  enc->time_base.num, enc->time_base.den);
926  }
927 
928  ret = avcodec_send_frame(enc, frame);
929  if (ret < 0)
930  goto error;
931 
932  while (1) {
933  ret = avcodec_receive_packet(enc, &pkt);
934  if (ret == AVERROR(EAGAIN))
935  break;
936  if (ret < 0)
937  goto error;
938 
939  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
940 
941  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
942 
943  if (debug_ts) {
944  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
945  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
946  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
947  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
948  }
949 
950  output_packet(of, &pkt, ost, 0);
951  }
952 
953  return;
954 error:
955  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
956  exit_program(1);
957 }
958 
959 static void do_subtitle_out(OutputFile *of,
960  OutputStream *ost,
961  AVSubtitle *sub)
962 {
963  int subtitle_out_max_size = 1024 * 1024;
964  int subtitle_out_size, nb, i;
965  AVCodecContext *enc;
966  AVPacket pkt;
967  int64_t pts;
968 
969  if (sub->pts == AV_NOPTS_VALUE) {
970  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
971  if (exit_on_error)
972  exit_program(1);
973  return;
974  }
975 
976  enc = ost->enc_ctx;
977 
978  if (!subtitle_out) {
979  subtitle_out = av_malloc(subtitle_out_max_size);
980  if (!subtitle_out) {
981  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
982  exit_program(1);
983  }
984  }
985 
986  /* Note: DVB subtitle need one packet to draw them and one other
987  packet to clear them */
988  /* XXX: signal it in the codec context ? */
990  nb = 2;
991  else
992  nb = 1;
993 
994  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
995  pts = sub->pts;
996  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
997  pts -= output_files[ost->file_index]->start_time;
998  for (i = 0; i < nb; i++) {
999  unsigned save_num_rects = sub->num_rects;
1000 
1001  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1002  if (!check_recording_time(ost))
1003  return;
1004 
1005  sub->pts = pts;
1006  // start_display_time is required to be 0
1007  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1008  sub->end_display_time -= sub->start_display_time;
1009  sub->start_display_time = 0;
1010  if (i == 1)
1011  sub->num_rects = 0;
1012 
1013  ost->frames_encoded++;
1014 
1015  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1016  subtitle_out_max_size, sub);
1017  if (i == 1)
1018  sub->num_rects = save_num_rects;
1019  if (subtitle_out_size < 0) {
1020  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1021  exit_program(1);
1022  }
1023 
1024  av_init_packet(&pkt);
1025  pkt.data = subtitle_out;
1026  pkt.size = subtitle_out_size;
1027  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1028  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1030  /* XXX: the pts correction is handled here. Maybe handling
1031  it in the codec would be better */
1032  if (i == 0)
1033  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1034  else
1035  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1036  }
1037  pkt.dts = pkt.pts;
1038  output_packet(of, &pkt, ost, 0);
1039  }
1040 }
1041 
1042 static void do_video_out(OutputFile *of,
1043  OutputStream *ost,
1044  AVFrame *next_picture,
1045  double sync_ipts)
1046 {
1047  int ret, format_video_sync;
1048  AVPacket pkt;
1049  AVCodecContext *enc = ost->enc_ctx;
1050  AVCodecParameters *mux_par = ost->st->codecpar;
1051  AVRational frame_rate;
1052  int nb_frames, nb0_frames, i;
1053  double delta, delta0;
1054  double duration = 0;
1055  int frame_size = 0;
1056  InputStream *ist = NULL;
1058 
1059  if (ost->source_index >= 0)
1060  ist = input_streams[ost->source_index];
1061 
1062  frame_rate = av_buffersink_get_frame_rate(filter);
1063  if (frame_rate.num > 0 && frame_rate.den > 0)
1064  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1065 
1066  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1067  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1068 
1069  if (!ost->filters_script &&
1070  !ost->filters &&
1071  next_picture &&
1072  ist &&
1073  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1074  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1075  }
1076 
1077  if (!next_picture) {
1078  //end, flushing
1079  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1080  ost->last_nb0_frames[1],
1081  ost->last_nb0_frames[2]);
1082  } else {
1083  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1084  delta = delta0 + duration;
1085 
1086  /* by default, we output a single frame */
1087  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1088  nb_frames = 1;
1089 
1090  format_video_sync = video_sync_method;
1091  if (format_video_sync == VSYNC_AUTO) {
1092  if(!strcmp(of->ctx->oformat->name, "avi")) {
1093  format_video_sync = VSYNC_VFR;
1094  } else
1095  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1096  if ( ist
1097  && format_video_sync == VSYNC_CFR
1098  && input_files[ist->file_index]->ctx->nb_streams == 1
1099  && input_files[ist->file_index]->input_ts_offset == 0) {
1100  format_video_sync = VSYNC_VSCFR;
1101  }
1102  if (format_video_sync == VSYNC_CFR && copy_ts) {
1103  format_video_sync = VSYNC_VSCFR;
1104  }
1105  }
1106  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1107 
1108  if (delta0 < 0 &&
1109  delta > 0 &&
1110  format_video_sync != VSYNC_PASSTHROUGH &&
1111  format_video_sync != VSYNC_DROP) {
1112  if (delta0 < -0.6) {
1113  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1114  } else
1115  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1116  sync_ipts = ost->sync_opts;
1117  duration += delta0;
1118  delta0 = 0;
1119  }
1120 
1121  switch (format_video_sync) {
1122  case VSYNC_VSCFR:
1123  if (ost->frame_number == 0 && delta0 >= 0.5) {
1124  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1125  delta = duration;
1126  delta0 = 0;
1127  ost->sync_opts = lrint(sync_ipts);
1128  }
1129  case VSYNC_CFR:
1130  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1131  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1132  nb_frames = 0;
1133  } else if (delta < -1.1)
1134  nb_frames = 0;
1135  else if (delta > 1.1) {
1136  nb_frames = lrintf(delta);
1137  if (delta0 > 1.1)
1138  nb0_frames = lrintf(delta0 - 0.6);
1139  }
1140  break;
1141  case VSYNC_VFR:
1142  if (delta <= -0.6)
1143  nb_frames = 0;
1144  else if (delta > 0.6)
1145  ost->sync_opts = lrint(sync_ipts);
1146  break;
1147  case VSYNC_DROP:
1148  case VSYNC_PASSTHROUGH:
1149  ost->sync_opts = lrint(sync_ipts);
1150  break;
1151  default:
1152  av_assert0(0);
1153  }
1154  }
1155 
1156  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1157  nb0_frames = FFMIN(nb0_frames, nb_frames);
1158 
1159  memmove(ost->last_nb0_frames + 1,
1160  ost->last_nb0_frames,
1161  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1162  ost->last_nb0_frames[0] = nb0_frames;
1163 
1164  if (nb0_frames == 0 && ost->last_dropped) {
1165  nb_frames_drop++;
1167  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1168  ost->frame_number, ost->st->index, ost->last_frame->pts);
1169  }
1170  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1171  if (nb_frames > dts_error_threshold * 30) {
1172  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1173  nb_frames_drop++;
1174  return;
1175  }
1176  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1177  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1178  if (nb_frames_dup > dup_warning) {
1179  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1180  dup_warning *= 10;
1181  }
1182  }
1183  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1184 
1185  /* duplicates frame if needed */
1186  for (i = 0; i < nb_frames; i++) {
1187  AVFrame *in_picture;
1188  av_init_packet(&pkt);
1189  pkt.data = NULL;
1190  pkt.size = 0;
1191 
1192  if (i < nb0_frames && ost->last_frame) {
1193  in_picture = ost->last_frame;
1194  } else
1195  in_picture = next_picture;
1196 
1197  if (!in_picture)
1198  return;
1199 
1200  in_picture->pts = ost->sync_opts;
1201 
1202 #if 1
1203  if (!check_recording_time(ost))
1204 #else
1205  if (ost->frame_number >= ost->max_frames)
1206 #endif
1207  return;
1208 
1209  {
1210  int forced_keyframe = 0;
1211  double pts_time;
1212 
1214  ost->top_field_first >= 0)
1215  in_picture->top_field_first = !!ost->top_field_first;
1216 
1217  if (in_picture->interlaced_frame) {
1218  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1219  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1220  else
1221  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1222  } else
1223  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1224 
1225  in_picture->quality = enc->global_quality;
1226  in_picture->pict_type = 0;
1227 
1228  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1229  in_picture->pts * av_q2d(enc->time_base) : NAN;
1230  if (ost->forced_kf_index < ost->forced_kf_count &&
1231  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1232  ost->forced_kf_index++;
1233  forced_keyframe = 1;
1234  } else if (ost->forced_keyframes_pexpr) {
1235  double res;
1236  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1239  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1245  res);
1246  if (res) {
1247  forced_keyframe = 1;
1253  }
1254 
1256  } else if ( ost->forced_keyframes
1257  && !strncmp(ost->forced_keyframes, "source", 6)
1258  && in_picture->key_frame==1) {
1259  forced_keyframe = 1;
1260  }
1261 
1262  if (forced_keyframe) {
1263  in_picture->pict_type = AV_PICTURE_TYPE_I;
1264  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1265  }
1266 
1268  if (debug_ts) {
1269  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1270  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1271  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1272  enc->time_base.num, enc->time_base.den);
1273  }
1274 
1275  ost->frames_encoded++;
1276 
1277  ret = avcodec_send_frame(enc, in_picture);
1278  if (ret < 0)
1279  goto error;
1280 
1281  while (1) {
1282  ret = avcodec_receive_packet(enc, &pkt);
1283  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1284  if (ret == AVERROR(EAGAIN))
1285  break;
1286  if (ret < 0)
1287  goto error;
1288 
1289  if (debug_ts) {
1290  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1291  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1292  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1293  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1294  }
1295 
1296  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1297  pkt.pts = ost->sync_opts;
1298 
1299  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1300 
1301  if (debug_ts) {
1302  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1305  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1306  }
1307 
1308  frame_size = pkt.size;
1309  output_packet(of, &pkt, ost, 0);
1310 
1311  /* if two pass, output log */
1312  if (ost->logfile && enc->stats_out) {
1313  fprintf(ost->logfile, "%s", enc->stats_out);
1314  }
1315  }
1316  }
1317  ost->sync_opts++;
1318  /*
1319  * For video, number of frames in == number of packets out.
1320  * But there may be reordering, so we can't throw away frames on encoder
1321  * flush, we need to limit them here, before they go into encoder.
1322  */
1323  ost->frame_number++;
1324 
1325  if (vstats_filename && frame_size)
1326  do_video_stats(ost, frame_size);
1327  }
1328 
1329  if (!ost->last_frame)
1330  ost->last_frame = av_frame_alloc();
1331  av_frame_unref(ost->last_frame);
1332  if (next_picture && ost->last_frame)
1333  av_frame_ref(ost->last_frame, next_picture);
1334  else
1335  av_frame_free(&ost->last_frame);
1336 
1337  return;
1338 error:
1339  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1340  exit_program(1);
1341 }
1342 
1343 static double psnr(double d)
1344 {
1345  return -10.0 * log10(d);
1346 }
1347 
1349 {
1350  AVCodecContext *enc;
1351  int frame_number;
1352  double ti1, bitrate, avg_bitrate;
1353 
1354  /* this is executed just the first time do_video_stats is called */
1355  if (!vstats_file) {
1356  vstats_file = fopen(vstats_filename, "w");
1357  if (!vstats_file) {
1358  perror("fopen");
1359  exit_program(1);
1360  }
1361  }
1362 
1363  enc = ost->enc_ctx;
1364  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1365  frame_number = ost->st->nb_frames;
1366  if (vstats_version <= 1) {
1367  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1368  ost->quality / (float)FF_QP2LAMBDA);
1369  } else {
1370  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1371  ost->quality / (float)FF_QP2LAMBDA);
1372  }
1373 
1374  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1375  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1376 
1377  fprintf(vstats_file,"f_size= %6d ", frame_size);
1378  /* compute pts value */
1379  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1380  if (ti1 < 0.01)
1381  ti1 = 0.01;
1382 
1383  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1384  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1385  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1386  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1387  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1388  }
1389 }
1390 
1391 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1392 
1394 {
1395  OutputFile *of = output_files[ost->file_index];
1396  int i;
1397 
1399 
1400  if (of->shortest) {
1401  for (i = 0; i < of->ctx->nb_streams; i++)
1402  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1403  }
1404 }
1405 
1406 /**
1407  * Get and encode new output from any of the filtergraphs, without causing
1408  * activity.
1409  *
1410  * @return 0 for success, <0 for severe errors
1411  */
1412 static int reap_filters(int flush)
1413 {
1414  AVFrame *filtered_frame = NULL;
1415  int i;
1416 
1417  /* Reap all buffers present in the buffer sinks */
1418  for (i = 0; i < nb_output_streams; i++) {
1419  OutputStream *ost = output_streams[i];
1420  OutputFile *of = output_files[ost->file_index];
1422  AVCodecContext *enc = ost->enc_ctx;
1423  int ret = 0;
1424 
1425  if (!ost->filter || !ost->filter->graph->graph)
1426  continue;
1427  filter = ost->filter->filter;
1428 
1429  if (!ost->initialized) {
1430  char error[1024] = "";
1431  ret = init_output_stream(ost, error, sizeof(error));
1432  if (ret < 0) {
1433  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1434  ost->file_index, ost->index, error);
1435  exit_program(1);
1436  }
1437  }
1438 
1439  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1440  return AVERROR(ENOMEM);
1441  }
1442  filtered_frame = ost->filtered_frame;
1443 
1444  while (1) {
1445  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1446  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1448  if (ret < 0) {
1449  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1451  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1452  } else if (flush && ret == AVERROR_EOF) {
1454  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1455  }
1456  break;
1457  }
1458  if (ost->finished) {
1459  av_frame_unref(filtered_frame);
1460  continue;
1461  }
1462  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1463  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1464  AVRational filter_tb = av_buffersink_get_time_base(filter);
1465  AVRational tb = enc->time_base;
1466  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1467 
1468  tb.den <<= extra_bits;
1469  float_pts =
1470  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1471  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1472  float_pts /= 1 << extra_bits;
1473  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1474  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1475 
1476  filtered_frame->pts =
1477  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1478  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1479  }
1480  //if (ost->source_index >= 0)
1481  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1482 
1483  switch (av_buffersink_get_type(filter)) {
1484  case AVMEDIA_TYPE_VIDEO:
1485  if (!ost->frame_aspect_ratio.num)
1486  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1487 
1488  if (debug_ts) {
1489  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1490  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1491  float_pts,
1492  enc->time_base.num, enc->time_base.den);
1493  }
1494 
1495  do_video_out(of, ost, filtered_frame, float_pts);
1496  break;
1497  case AVMEDIA_TYPE_AUDIO:
1498  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1499  enc->channels != filtered_frame->channels) {
1501  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1502  break;
1503  }
1504  do_audio_out(of, ost, filtered_frame);
1505  break;
1506  default:
1507  // TODO support subtitle filters
1508  av_assert0(0);
1509  }
1510 
1511  av_frame_unref(filtered_frame);
1512  }
1513  }
1514 
1515  return 0;
1516 }
1517 
1518 static void print_final_stats(int64_t total_size)
1519 {
1520  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1521  uint64_t subtitle_size = 0;
1522  uint64_t data_size = 0;
1523  float percent = -1.0;
1524  int i, j;
1525  int pass1_used = 1;
1526 
1527  for (i = 0; i < nb_output_streams; i++) {
1528  OutputStream *ost = output_streams[i];
1529  switch (ost->enc_ctx->codec_type) {
1530  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1531  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1532  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1533  default: other_size += ost->data_size; break;
1534  }
1535  extra_size += ost->enc_ctx->extradata_size;
1536  data_size += ost->data_size;
1539  pass1_used = 0;
1540  }
1541 
1542  if (data_size && total_size>0 && total_size >= data_size)
1543  percent = 100.0 * (total_size - data_size) / data_size;
1544 
1545  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1546  video_size / 1024.0,
1547  audio_size / 1024.0,
1548  subtitle_size / 1024.0,
1549  other_size / 1024.0,
1550  extra_size / 1024.0);
1551  if (percent >= 0.0)
1552  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1553  else
1554  av_log(NULL, AV_LOG_INFO, "unknown");
1555  av_log(NULL, AV_LOG_INFO, "\n");
1556 
1557  /* print verbose per-stream stats */
1558  for (i = 0; i < nb_input_files; i++) {
1559  InputFile *f = input_files[i];
1560  uint64_t total_packets = 0, total_size = 0;
1561 
1562  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1563  i, f->ctx->filename);
1564 
1565  for (j = 0; j < f->nb_streams; j++) {
1566  InputStream *ist = input_streams[f->ist_index + j];
1567  enum AVMediaType type = ist->dec_ctx->codec_type;
1568 
1569  total_size += ist->data_size;
1570  total_packets += ist->nb_packets;
1571 
1572  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1573  i, j, media_type_string(type));
1574  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1575  ist->nb_packets, ist->data_size);
1576 
1577  if (ist->decoding_needed) {
1578  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1579  ist->frames_decoded);
1580  if (type == AVMEDIA_TYPE_AUDIO)
1581  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1582  av_log(NULL, AV_LOG_VERBOSE, "; ");
1583  }
1584 
1585  av_log(NULL, AV_LOG_VERBOSE, "\n");
1586  }
1587 
1588  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1589  total_packets, total_size);
1590  }
1591 
1592  for (i = 0; i < nb_output_files; i++) {
1593  OutputFile *of = output_files[i];
1594  uint64_t total_packets = 0, total_size = 0;
1595 
1596  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1597  i, of->ctx->filename);
1598 
1599  for (j = 0; j < of->ctx->nb_streams; j++) {
1600  OutputStream *ost = output_streams[of->ost_index + j];
1601  enum AVMediaType type = ost->enc_ctx->codec_type;
1602 
1603  total_size += ost->data_size;
1604  total_packets += ost->packets_written;
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1607  i, j, media_type_string(type));
1608  if (ost->encoding_needed) {
1609  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1610  ost->frames_encoded);
1611  if (type == AVMEDIA_TYPE_AUDIO)
1612  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1613  av_log(NULL, AV_LOG_VERBOSE, "; ");
1614  }
1615 
1616  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1617  ost->packets_written, ost->data_size);
1618 
1619  av_log(NULL, AV_LOG_VERBOSE, "\n");
1620  }
1621 
1622  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1623  total_packets, total_size);
1624  }
1625  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1626  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1627  if (pass1_used) {
1628  av_log(NULL, AV_LOG_WARNING, "\n");
1629  } else {
1630  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1631  }
1632  }
1633 }
1634 
1635 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1636 {
1637  char buf[1024];
1638  AVBPrint buf_script;
1639  OutputStream *ost;
1640  AVFormatContext *oc;
1641  int64_t total_size;
1642  AVCodecContext *enc;
1643  int frame_number, vid, i;
1644  double bitrate;
1645  double speed;
1646  int64_t pts = INT64_MIN + 1;
1647  static int64_t last_time = -1;
1648  static int qp_histogram[52];
1649  int hours, mins, secs, us;
1650  int ret;
1651  float t;
1652 
1653  if (!print_stats && !is_last_report && !progress_avio)
1654  return;
1655 
1656  if (!is_last_report) {
1657  if (last_time == -1) {
1658  last_time = cur_time;
1659  return;
1660  }
1661  if ((cur_time - last_time) < 500000)
1662  return;
1663  last_time = cur_time;
1664  }
1665 
1666  t = (cur_time-timer_start) / 1000000.0;
1667 
1668 
1669  oc = output_files[0]->ctx;
1670 
1671  total_size = avio_size(oc->pb);
1672  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1673  total_size = avio_tell(oc->pb);
1674 
1675  buf[0] = '\0';
1676  vid = 0;
1677  av_bprint_init(&buf_script, 0, 1);
1678  for (i = 0; i < nb_output_streams; i++) {
1679  float q = -1;
1680  ost = output_streams[i];
1681  enc = ost->enc_ctx;
1682  if (!ost->stream_copy)
1683  q = ost->quality / (float) FF_QP2LAMBDA;
1684 
1685  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1687  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1688  ost->file_index, ost->index, q);
1689  }
1690  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1691  float fps;
1692 
1693  frame_number = ost->frame_number;
1694  fps = t > 1 ? frame_number / t : 0;
1695  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1696  frame_number, fps < 9.95, fps, q);
1697  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1698  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1699  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700  ost->file_index, ost->index, q);
1701  if (is_last_report)
1702  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1703  if (qp_hist) {
1704  int j;
1705  int qp = lrintf(q);
1706  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1707  qp_histogram[qp]++;
1708  for (j = 0; j < 32; j++)
1709  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1710  }
1711 
1712  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1713  int j;
1714  double error, error_sum = 0;
1715  double scale, scale_sum = 0;
1716  double p;
1717  char type[3] = { 'Y','U','V' };
1718  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1719  for (j = 0; j < 3; j++) {
1720  if (is_last_report) {
1721  error = enc->error[j];
1722  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1723  } else {
1724  error = ost->error[j];
1725  scale = enc->width * enc->height * 255.0 * 255.0;
1726  }
1727  if (j)
1728  scale /= 4;
1729  error_sum += error;
1730  scale_sum += scale;
1731  p = psnr(error / scale);
1732  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1733  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1734  ost->file_index, ost->index, type[j] | 32, p);
1735  }
1736  p = psnr(error_sum / scale_sum);
1737  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1738  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1739  ost->file_index, ost->index, p);
1740  }
1741  vid = 1;
1742  }
1743  /* compute min output value */
1745  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1746  ost->st->time_base, AV_TIME_BASE_Q));
1747  if (is_last_report)
1748  nb_frames_drop += ost->last_dropped;
1749  }
1750 
1751  secs = FFABS(pts) / AV_TIME_BASE;
1752  us = FFABS(pts) % AV_TIME_BASE;
1753  mins = secs / 60;
1754  secs %= 60;
1755  hours = mins / 60;
1756  mins %= 60;
1757 
1758  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1759  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1760 
1761  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1762  "size=N/A time=");
1763  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1764  "size=%8.0fkB time=", total_size / 1024.0);
1765  if (pts < 0)
1766  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1767  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1768  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1769  (100 * us) / AV_TIME_BASE);
1770 
1771  if (bitrate < 0) {
1772  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1773  av_bprintf(&buf_script, "bitrate=N/A\n");
1774  }else{
1775  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1776  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1777  }
1778 
1779  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1780  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1781  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1782  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1783  hours, mins, secs, us);
1784 
1786  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1788  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1789  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1790 
1791  if (speed < 0) {
1792  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1793  av_bprintf(&buf_script, "speed=N/A\n");
1794  } else {
1795  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1796  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1797  }
1798 
1799  if (print_stats || is_last_report) {
1800  const char end = is_last_report ? '\n' : '\r';
1801  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1802  fprintf(stderr, "%s %c", buf, end);
1803  } else
1804  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1805 
1806  fflush(stderr);
1807  }
1808 
1809  if (progress_avio) {
1810  av_bprintf(&buf_script, "progress=%s\n",
1811  is_last_report ? "end" : "continue");
1812  avio_write(progress_avio, buf_script.str,
1813  FFMIN(buf_script.len, buf_script.size - 1));
1814  avio_flush(progress_avio);
1815  av_bprint_finalize(&buf_script, NULL);
1816  if (is_last_report) {
1817  if ((ret = avio_closep(&progress_avio)) < 0)
1819  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1820  }
1821  }
1822 
1823  if (is_last_report)
1824  print_final_stats(total_size);
1825 }
1826 
1827 static void flush_encoders(void)
1828 {
1829  int i, ret;
1830 
1831  for (i = 0; i < nb_output_streams; i++) {
1832  OutputStream *ost = output_streams[i];
1833  AVCodecContext *enc = ost->enc_ctx;
1834  OutputFile *of = output_files[ost->file_index];
1835 
1836  if (!ost->encoding_needed)
1837  continue;
1838 
1839  // Try to enable encoding with no input frames.
1840  // Maybe we should just let encoding fail instead.
1841  if (!ost->initialized) {
1842  FilterGraph *fg = ost->filter->graph;
1843  char error[1024] = "";
1844 
1846  "Finishing stream %d:%d without any data written to it.\n",
1847  ost->file_index, ost->st->index);
1848 
1849  if (ost->filter && !fg->graph) {
1850  int x;
1851  for (x = 0; x < fg->nb_inputs; x++) {
1852  InputFilter *ifilter = fg->inputs[x];
1853  if (ifilter->format < 0) {
1854  AVCodecParameters *par = ifilter->ist->st->codecpar;
1855  // We never got any input. Set a fake format, which will
1856  // come from libavformat.
1857  ifilter->format = par->format;
1858  ifilter->sample_rate = par->sample_rate;
1859  ifilter->channels = par->channels;
1860  ifilter->channel_layout = par->channel_layout;
1861  ifilter->width = par->width;
1862  ifilter->height = par->height;
1863  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1864  }
1865  }
1866 
1868  continue;
1869 
1870  ret = configure_filtergraph(fg);
1871  if (ret < 0) {
1872  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1873  exit_program(1);
1874  }
1875 
1876  finish_output_stream(ost);
1877  }
1878 
1879  ret = init_output_stream(ost, error, sizeof(error));
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1882  ost->file_index, ost->index, error);
1883  exit_program(1);
1884  }
1885  }
1886 
1887  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1888  continue;
1889 
1891  continue;
1892 
1893  for (;;) {
1894  const char *desc = NULL;
1895  AVPacket pkt;
1896  int pkt_size;
1897 
1898  switch (enc->codec_type) {
1899  case AVMEDIA_TYPE_AUDIO:
1900  desc = "audio";
1901  break;
1902  case AVMEDIA_TYPE_VIDEO:
1903  desc = "video";
1904  break;
1905  default:
1906  av_assert0(0);
1907  }
1908 
1909  av_init_packet(&pkt);
1910  pkt.data = NULL;
1911  pkt.size = 0;
1912 
1914 
1915  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1916  ret = avcodec_send_frame(enc, NULL);
1917  if (ret < 0) {
1918  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1919  desc,
1920  av_err2str(ret));
1921  exit_program(1);
1922  }
1923  }
1924 
1925  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1926  if (ret < 0 && ret != AVERROR_EOF) {
1927  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1928  desc,
1929  av_err2str(ret));
1930  exit_program(1);
1931  }
1932  if (ost->logfile && enc->stats_out) {
1933  fprintf(ost->logfile, "%s", enc->stats_out);
1934  }
1935  if (ret == AVERROR_EOF) {
1936  output_packet(of, &pkt, ost, 1);
1937  break;
1938  }
1939  if (ost->finished & MUXER_FINISHED) {
1940  av_packet_unref(&pkt);
1941  continue;
1942  }
1943  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1944  pkt_size = pkt.size;
1945  output_packet(of, &pkt, ost, 0);
1947  do_video_stats(ost, pkt_size);
1948  }
1949  }
1950  }
1951 }
1952 
1953 /*
1954  * Check whether a packet from ist should be written into ost at this time
1955  */
1957 {
1958  OutputFile *of = output_files[ost->file_index];
1959  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1960 
1961  if (ost->source_index != ist_index)
1962  return 0;
1963 
1964  if (ost->finished)
1965  return 0;
1966 
1967  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1968  return 0;
1969 
1970  return 1;
1971 }
1972 
1973 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1974 {
1975  OutputFile *of = output_files[ost->file_index];
1976  InputFile *f = input_files [ist->file_index];
1977  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1978  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1979  AVPacket opkt = { 0 };
1980 
1981  av_init_packet(&opkt);
1982 
1983  // EOF: flush output bitstream filters.
1984  if (!pkt) {
1985  output_packet(of, &opkt, ost, 1);
1986  return;
1987  }
1988 
1989  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1991  return;
1992 
1993  if (!ost->frame_number && !ost->copy_prior_start) {
1994  int64_t comp_start = start_time;
1995  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1996  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1997  if (pkt->pts == AV_NOPTS_VALUE ?
1998  ist->pts < comp_start :
1999  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2000  return;
2001  }
2002 
2003  if (of->recording_time != INT64_MAX &&
2004  ist->pts >= of->recording_time + start_time) {
2005  close_output_stream(ost);
2006  return;
2007  }
2008 
2009  if (f->recording_time != INT64_MAX) {
2010  start_time = f->ctx->start_time;
2011  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2012  start_time += f->start_time;
2013  if (ist->pts >= f->recording_time + start_time) {
2014  close_output_stream(ost);
2015  return;
2016  }
2017  }
2018 
2019  /* force the input stream PTS */
2020  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2021  ost->sync_opts++;
2022 
2023  if (pkt->pts != AV_NOPTS_VALUE)
2024  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2025  else
2026  opkt.pts = AV_NOPTS_VALUE;
2027 
2028  if (pkt->dts == AV_NOPTS_VALUE)
2029  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2030  else
2031  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2032  opkt.dts -= ost_tb_start_time;
2033 
2034  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2036  if(!duration)
2037  duration = ist->dec_ctx->frame_size;
2038  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2040  ost->mux_timebase) - ost_tb_start_time;
2041  }
2042 
2043  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2044 
2045  opkt.flags = pkt->flags;
2046  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2047  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2048  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2049  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2050  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2051  ) {
2052  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2053  &opkt.data, &opkt.size,
2054  pkt->data, pkt->size,
2056  if (ret < 0) {
2057  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2058  av_err2str(ret));
2059  exit_program(1);
2060  }
2061  if (ret) {
2062  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2063  if (!opkt.buf)
2064  exit_program(1);
2065  }
2066  } else {
2067  opkt.data = pkt->data;
2068  opkt.size = pkt->size;
2069  }
2070  av_copy_packet_side_data(&opkt, pkt);
2071 
2072  output_packet(of, &opkt, ost, 0);
2073 }
2074 
2076 {
2077  AVCodecContext *dec = ist->dec_ctx;
2078 
2079  if (!dec->channel_layout) {
2080  char layout_name[256];
2081 
2082  if (dec->channels > ist->guess_layout_max)
2083  return 0;
2085  if (!dec->channel_layout)
2086  return 0;
2087  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2088  dec->channels, dec->channel_layout);
2089  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2090  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2091  }
2092  return 1;
2093 }
2094 
2095 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2096 {
2097  if (*got_output || ret<0)
2098  decode_error_stat[ret<0] ++;
2099 
2100  if (ret < 0 && exit_on_error)
2101  exit_program(1);
2102 
2103  if (exit_on_error && *got_output && ist) {
2105  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2106  exit_program(1);
2107  }
2108  }
2109 }
2110 
2111 // Filters can be configured only if the formats of all inputs are known.
2113 {
2114  int i;
2115  for (i = 0; i < fg->nb_inputs; i++) {
2116  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2117  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2118  return 0;
2119  }
2120  return 1;
2121 }
2122 
2124 {
2125  FilterGraph *fg = ifilter->graph;
2126  int need_reinit, ret, i;
2127 
2128  /* determine if the parameters for this input changed */
2129  need_reinit = ifilter->format != frame->format;
2130  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2131  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2132  need_reinit = 1;
2133 
2134  switch (ifilter->ist->st->codecpar->codec_type) {
2135  case AVMEDIA_TYPE_AUDIO:
2136  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2137  ifilter->channels != frame->channels ||
2138  ifilter->channel_layout != frame->channel_layout;
2139  break;
2140  case AVMEDIA_TYPE_VIDEO:
2141  need_reinit |= ifilter->width != frame->width ||
2142  ifilter->height != frame->height;
2143  break;
2144  }
2145 
2146  if (need_reinit) {
2147  ret = ifilter_parameters_from_frame(ifilter, frame);
2148  if (ret < 0)
2149  return ret;
2150  }
2151 
2152  /* (re)init the graph if possible, otherwise buffer the frame and return */
2153  if (need_reinit || !fg->graph) {
2154  for (i = 0; i < fg->nb_inputs; i++) {
2155  if (!ifilter_has_all_input_formats(fg)) {
2156  AVFrame *tmp = av_frame_clone(frame);
2157  if (!tmp)
2158  return AVERROR(ENOMEM);
2159  av_frame_unref(frame);
2160 
2161  if (!av_fifo_space(ifilter->frame_queue)) {
2162  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2163  if (ret < 0) {
2164  av_frame_free(&tmp);
2165  return ret;
2166  }
2167  }
2168  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2169  return 0;
2170  }
2171  }
2172 
2173  ret = reap_filters(1);
2174  if (ret < 0 && ret != AVERROR_EOF) {
2175  char errbuf[128];
2176  av_strerror(ret, errbuf, sizeof(errbuf));
2177 
2178  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2179  return ret;
2180  }
2181 
2182  ret = configure_filtergraph(fg);
2183  if (ret < 0) {
2184  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2185  return ret;
2186  }
2187  }
2188 
2190  if (ret < 0) {
2191  if (ret != AVERROR_EOF)
2192  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2193  return ret;
2194  }
2195 
2196  return 0;
2197 }
2198 
2199 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2200 {
2201  int i, j, ret;
2202 
2203  ifilter->eof = 1;
2204 
2205  if (ifilter->filter) {
2206  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2207  if (ret < 0)
2208  return ret;
2209  } else {
2210  // the filtergraph was never configured
2211  FilterGraph *fg = ifilter->graph;
2212  for (i = 0; i < fg->nb_inputs; i++)
2213  if (!fg->inputs[i]->eof)
2214  break;
2215  if (i == fg->nb_inputs) {
2216  // All the input streams have finished without the filtergraph
2217  // ever being configured.
2218  // Mark the output streams as finished.
2219  for (j = 0; j < fg->nb_outputs; j++)
2220  finish_output_stream(fg->outputs[j]->ost);
2221  }
2222  }
2223 
2224  return 0;
2225 }
2226 
2227 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2228 // There is the following difference: if you got a frame, you must call
2229 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2230 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2231 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2232 {
2233  int ret;
2234 
2235  *got_frame = 0;
2236 
2237  if (pkt) {
2238  ret = avcodec_send_packet(avctx, pkt);
2239  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2240  // decoded frames with avcodec_receive_frame() until done.
2241  if (ret < 0 && ret != AVERROR_EOF)
2242  return ret;
2243  }
2244 
2245  ret = avcodec_receive_frame(avctx, frame);
2246  if (ret < 0 && ret != AVERROR(EAGAIN))
2247  return ret;
2248  if (ret >= 0)
2249  *got_frame = 1;
2250 
2251  return 0;
2252 }
2253 
2254 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2255 {
2256  int i, ret;
2257  AVFrame *f;
2258 
2259  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2260  for (i = 0; i < ist->nb_filters; i++) {
2261  if (i < ist->nb_filters - 1) {
2262  f = ist->filter_frame;
2263  ret = av_frame_ref(f, decoded_frame);
2264  if (ret < 0)
2265  break;
2266  } else
2267  f = decoded_frame;
2268  ret = ifilter_send_frame(ist->filters[i], f);
2269  if (ret == AVERROR_EOF)
2270  ret = 0; /* ignore */
2271  if (ret < 0) {
2273  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2274  break;
2275  }
2276  }
2277  return ret;
2278 }
2279 
2280 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2281  int *decode_failed)
2282 {
2283  AVFrame *decoded_frame;
2284  AVCodecContext *avctx = ist->dec_ctx;
2285  int ret, err = 0;
2286  AVRational decoded_frame_tb;
2287 
2288  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2289  return AVERROR(ENOMEM);
2290  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2291  return AVERROR(ENOMEM);
2292  decoded_frame = ist->decoded_frame;
2293 
2295  ret = decode(avctx, decoded_frame, got_output, pkt);
2296  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2297  if (ret < 0)
2298  *decode_failed = 1;
2299 
2300  if (ret >= 0 && avctx->sample_rate <= 0) {
2301  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2302  ret = AVERROR_INVALIDDATA;
2303  }
2304 
2305  if (ret != AVERROR_EOF)
2306  check_decode_result(ist, got_output, ret);
2307 
2308  if (!*got_output || ret < 0)
2309  return ret;
2310 
2311  ist->samples_decoded += decoded_frame->nb_samples;
2312  ist->frames_decoded++;
2313 
2314 #if 1
2315  /* increment next_dts to use for the case where the input stream does not
2316  have timestamps or there are multiple frames in the packet */
2317  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2318  avctx->sample_rate;
2319  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320  avctx->sample_rate;
2321 #endif
2322 
2323  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324  decoded_frame_tb = ist->st->time_base;
2325  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326  decoded_frame->pts = pkt->pts;
2327  decoded_frame_tb = ist->st->time_base;
2328  }else {
2329  decoded_frame->pts = ist->dts;
2330  decoded_frame_tb = AV_TIME_BASE_Q;
2331  }
2332  if (decoded_frame->pts != AV_NOPTS_VALUE)
2333  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335  (AVRational){1, avctx->sample_rate});
2336  ist->nb_samples = decoded_frame->nb_samples;
2337  err = send_frame_to_filters(ist, decoded_frame);
2338 
2339  av_frame_unref(ist->filter_frame);
2340  av_frame_unref(decoded_frame);
2341  return err < 0 ? err : ret;
2342 }
2343 
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2345  int *decode_failed)
2346 {
2347  AVFrame *decoded_frame;
2348  int i, ret = 0, err = 0;
2349  int64_t best_effort_timestamp;
2350  int64_t dts = AV_NOPTS_VALUE;
2351  AVPacket avpkt;
2352 
2353  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2355  // skip the packet.
2356  if (!eof && pkt && pkt->size == 0)
2357  return 0;
2358 
2359  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360  return AVERROR(ENOMEM);
2361  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362  return AVERROR(ENOMEM);
2363  decoded_frame = ist->decoded_frame;
2364  if (ist->dts != AV_NOPTS_VALUE)
2365  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2366  if (pkt) {
2367  avpkt = *pkt;
2368  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2369  }
2370 
2371  // The old code used to set dts on the drain packet, which does not work
2372  // with the new API anymore.
2373  if (eof) {
2374  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2375  if (!new)
2376  return AVERROR(ENOMEM);
2377  ist->dts_buffer = new;
2378  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2379  }
2380 
2382  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2384  if (ret < 0)
2385  *decode_failed = 1;
2386 
2387  // The following line may be required in some cases where there is no parser
2388  // or the parser does not has_b_frames correctly
2389  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2392  } else
2394  "video_delay is larger in decoder than demuxer %d > %d.\n"
2395  "If you want to help, upload a sample "
2396  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398  ist->dec_ctx->has_b_frames,
2399  ist->st->codecpar->video_delay);
2400  }
2401 
2402  if (ret != AVERROR_EOF)
2403  check_decode_result(ist, got_output, ret);
2404 
2405  if (*got_output && ret >= 0) {
2406  if (ist->dec_ctx->width != decoded_frame->width ||
2407  ist->dec_ctx->height != decoded_frame->height ||
2408  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410  decoded_frame->width,
2411  decoded_frame->height,
2412  decoded_frame->format,
2413  ist->dec_ctx->width,
2414  ist->dec_ctx->height,
2415  ist->dec_ctx->pix_fmt);
2416  }
2417  }
2418 
2419  if (!*got_output || ret < 0)
2420  return ret;
2421 
2422  if(ist->top_field_first>=0)
2423  decoded_frame->top_field_first = ist->top_field_first;
2424 
2425  ist->frames_decoded++;
2426 
2427  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2429  if (err < 0)
2430  goto fail;
2431  }
2432  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2433 
2434  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435  *duration_pts = decoded_frame->pkt_duration;
2436 
2437  if (ist->framerate.num)
2438  best_effort_timestamp = ist->cfr_next_pts++;
2439 
2440  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441  best_effort_timestamp = ist->dts_buffer[0];
2442 
2443  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445  ist->nb_dts_buffer--;
2446  }
2447 
2448  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2450 
2451  if (ts != AV_NOPTS_VALUE)
2452  ist->next_pts = ist->pts = ts;
2453  }
2454 
2455  if (debug_ts) {
2456  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458  ist->st->index, av_ts2str(decoded_frame->pts),
2459  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460  best_effort_timestamp,
2461  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462  decoded_frame->key_frame, decoded_frame->pict_type,
2463  ist->st->time_base.num, ist->st->time_base.den);
2464  }
2465 
2466  if (ist->st->sample_aspect_ratio.num)
2467  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2468 
2469  err = send_frame_to_filters(ist, decoded_frame);
2470 
2471 fail:
2473  av_frame_unref(decoded_frame);
2474  return err < 0 ? err : ret;
2475 }
2476 
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2478  int *decode_failed)
2479 {
2480  AVSubtitle subtitle;
2481  int free_sub = 1;
2482  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483  &subtitle, got_output, pkt);
2484 
2485  check_decode_result(NULL, got_output, ret);
2486 
2487  if (ret < 0 || !*got_output) {
2488  *decode_failed = 1;
2489  if (!pkt->size)
2490  sub2video_flush(ist);
2491  return ret;
2492  }
2493 
2494  if (ist->fix_sub_duration) {
2495  int end = 1;
2496  if (ist->prev_sub.got_output) {
2497  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498  1000, AV_TIME_BASE);
2499  if (end < ist->prev_sub.subtitle.end_display_time) {
2500  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2503  end <= 0 ? ", dropping it" : "");
2505  }
2506  }
2507  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508  FFSWAP(int, ret, ist->prev_sub.ret);
2509  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2510  if (end <= 0)
2511  goto out;
2512  }
2513 
2514  if (!*got_output)
2515  return ret;
2516 
2517  if (ist->sub2video.frame) {
2518  sub2video_update(ist, &subtitle);
2519  } else if (ist->nb_filters) {
2520  if (!ist->sub2video.sub_queue)
2521  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522  if (!ist->sub2video.sub_queue)
2523  exit_program(1);
2524  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2526  if (ret < 0)
2527  exit_program(1);
2528  }
2529  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2530  free_sub = 0;
2531  }
2532 
2533  if (!subtitle.num_rects)
2534  goto out;
2535 
2536  ist->frames_decoded++;
2537 
2538  for (i = 0; i < nb_output_streams; i++) {
2539  OutputStream *ost = output_streams[i];
2540 
2541  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2543  continue;
2544 
2545  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2546  }
2547 
2548 out:
2549  if (free_sub)
2550  avsubtitle_free(&subtitle);
2551  return ret;
2552 }
2553 
2555 {
2556  int i, ret;
2557  /* TODO keep pts also in stream time base to avoid converting back */
2558  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2560 
2561  for (i = 0; i < ist->nb_filters; i++) {
2562  ret = ifilter_send_eof(ist->filters[i], pts);
2563  if (ret < 0)
2564  return ret;
2565  }
2566  return 0;
2567 }
2568 
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2571 {
2572  int ret = 0, i;
2573  int repeating = 0;
2574  int eof_reached = 0;
2575 
2576  AVPacket avpkt;
2577  if (!ist->saw_first_ts) {
2578  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2579  ist->pts = 0;
2580  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2583  }
2584  ist->saw_first_ts = 1;
2585  }
2586 
2587  if (ist->next_dts == AV_NOPTS_VALUE)
2588  ist->next_dts = ist->dts;
2589  if (ist->next_pts == AV_NOPTS_VALUE)
2590  ist->next_pts = ist->pts;
2591 
2592  if (!pkt) {
2593  /* EOF handling */
2594  av_init_packet(&avpkt);
2595  avpkt.data = NULL;
2596  avpkt.size = 0;
2597  } else {
2598  avpkt = *pkt;
2599  }
2600 
2601  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604  ist->next_pts = ist->pts = ist->dts;
2605  }
2606 
2607  // while we have more to decode or while the decoder did output something on EOF
2608  while (ist->decoding_needed) {
2609  int64_t duration_dts = 0;
2610  int64_t duration_pts = 0;
2611  int got_output = 0;
2612  int decode_failed = 0;
2613 
2614  ist->pts = ist->next_pts;
2615  ist->dts = ist->next_dts;
2616 
2617  switch (ist->dec_ctx->codec_type) {
2618  case AVMEDIA_TYPE_AUDIO:
2619  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2620  &decode_failed);
2621  break;
2622  case AVMEDIA_TYPE_VIDEO:
2623  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2624  &decode_failed);
2625  if (!repeating || !pkt || got_output) {
2626  if (pkt && pkt->duration) {
2627  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2630  duration_dts = ((int64_t)AV_TIME_BASE *
2631  ist->dec_ctx->framerate.den * ticks) /
2633  }
2634 
2635  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636  ist->next_dts += duration_dts;
2637  }else
2638  ist->next_dts = AV_NOPTS_VALUE;
2639  }
2640 
2641  if (got_output) {
2642  if (duration_pts > 0) {
2643  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2644  } else {
2645  ist->next_pts += duration_dts;
2646  }
2647  }
2648  break;
2649  case AVMEDIA_TYPE_SUBTITLE:
2650  if (repeating)
2651  break;
2652  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653  if (!pkt && ret >= 0)
2654  ret = AVERROR_EOF;
2655  break;
2656  default:
2657  return -1;
2658  }
2659 
2660  if (ret == AVERROR_EOF) {
2661  eof_reached = 1;
2662  break;
2663  }
2664 
2665  if (ret < 0) {
2666  if (decode_failed) {
2667  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668  ist->file_index, ist->st->index, av_err2str(ret));
2669  } else {
2670  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2672  }
2673  if (!decode_failed || exit_on_error)
2674  exit_program(1);
2675  break;
2676  }
2677 
2678  if (got_output)
2679  ist->got_output = 1;
2680 
2681  if (!got_output)
2682  break;
2683 
2684  // During draining, we might get multiple output frames in this loop.
2685  // ffmpeg.c does not drain the filter chain on configuration changes,
2686  // which means if we send multiple frames at once to the filters, and
2687  // one of those frames changes configuration, the buffered frames will
2688  // be lost. This can upset certain FATE tests.
2689  // Decode only 1 frame per call on EOF to appease these FATE tests.
2690  // The ideal solution would be to rewrite decoding to use the new
2691  // decoding API in a better way.
2692  if (!pkt)
2693  break;
2694 
2695  repeating = 1;
2696  }
2697 
2698  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699  /* except when looping we need to flush but not to send an EOF */
2700  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701  int ret = send_filter_eof(ist);
2702  if (ret < 0) {
2703  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2704  exit_program(1);
2705  }
2706  }
2707 
2708  /* handle stream copy */
2709  if (!ist->decoding_needed && pkt) {
2710  ist->dts = ist->next_dts;
2711  switch (ist->dec_ctx->codec_type) {
2712  case AVMEDIA_TYPE_AUDIO:
2713  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2714  ist->dec_ctx->sample_rate;
2715  break;
2716  case AVMEDIA_TYPE_VIDEO:
2717  if (ist->framerate.num) {
2718  // TODO: Remove work-around for c99-to-c89 issue 7
2719  AVRational time_base_q = AV_TIME_BASE_Q;
2720  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2721  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2722  } else if (pkt->duration) {
2723  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2724  } else if(ist->dec_ctx->framerate.num != 0) {
2725  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2726  ist->next_dts += ((int64_t)AV_TIME_BASE *
2727  ist->dec_ctx->framerate.den * ticks) /
2729  }
2730  break;
2731  }
2732  ist->pts = ist->dts;
2733  ist->next_pts = ist->next_dts;
2734  }
2735  for (i = 0; i < nb_output_streams; i++) {
2736  OutputStream *ost = output_streams[i];
2737 
2738  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2739  continue;
2740 
2741  do_streamcopy(ist, ost, pkt);
2742  }
2743 
2744  return !eof_reached;
2745 }
2746 
2747 static void print_sdp(void)
2748 {
2749  char sdp[16384];
2750  int i;
2751  int j;
2752  AVIOContext *sdp_pb;
2753  AVFormatContext **avc;
2754 
2755  for (i = 0; i < nb_output_files; i++) {
2756  if (!output_files[i]->header_written)
2757  return;
2758  }
2759 
2760  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2761  if (!avc)
2762  exit_program(1);
2763  for (i = 0, j = 0; i < nb_output_files; i++) {
2764  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2765  avc[j] = output_files[i]->ctx;
2766  j++;
2767  }
2768  }
2769 
2770  if (!j)
2771  goto fail;
2772 
2773  av_sdp_create(avc, j, sdp, sizeof(sdp));
2774 
2775  if (!sdp_filename) {
2776  printf("SDP:\n%s\n", sdp);
2777  fflush(stdout);
2778  } else {
2779  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2780  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2781  } else {
2782  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2783  avio_closep(&sdp_pb);
2785  }
2786  }
2787 
2788 fail:
2789  av_freep(&avc);
2790 }
2791 
2793 {
2794  InputStream *ist = s->opaque;
2795  const enum AVPixelFormat *p;
2796  int ret;
2797 
2798  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2800  const AVCodecHWConfig *config = NULL;
2801  int i;
2802 
2803  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2804  break;
2805 
2806  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2807  ist->hwaccel_id == HWACCEL_AUTO) {
2808  for (i = 0;; i++) {
2809  config = avcodec_get_hw_config(s->codec, i);
2810  if (!config)
2811  break;
2812  if (!(config->methods &
2814  continue;
2815  if (config->pix_fmt == *p)
2816  break;
2817  }
2818  }
2819  if (config) {
2820  if (config->device_type != ist->hwaccel_device_type) {
2821  // Different hwaccel offered, ignore.
2822  continue;
2823  }
2824 
2825  ret = hwaccel_decode_init(s);
2826  if (ret < 0) {
2827  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2829  "%s hwaccel requested for input stream #%d:%d, "
2830  "but cannot be initialized.\n",
2832  ist->file_index, ist->st->index);
2833  return AV_PIX_FMT_NONE;
2834  }
2835  continue;
2836  }
2837  } else {
2838  const HWAccel *hwaccel = NULL;
2839  int i;
2840  for (i = 0; hwaccels[i].name; i++) {
2841  if (hwaccels[i].pix_fmt == *p) {
2842  hwaccel = &hwaccels[i];
2843  break;
2844  }
2845  }
2846  if (!hwaccel) {
2847  // No hwaccel supporting this pixfmt.
2848  continue;
2849  }
2850  if (hwaccel->id != ist->hwaccel_id) {
2851  // Does not match requested hwaccel.
2852  continue;
2853  }
2854 
2855  ret = hwaccel->init(s);
2856  if (ret < 0) {
2858  "%s hwaccel requested for input stream #%d:%d, "
2859  "but cannot be initialized.\n", hwaccel->name,
2860  ist->file_index, ist->st->index);
2861  return AV_PIX_FMT_NONE;
2862  }
2863  }
2864 
2865  if (ist->hw_frames_ctx) {
2867  if (!s->hw_frames_ctx)
2868  return AV_PIX_FMT_NONE;
2869  }
2870 
2871  ist->hwaccel_pix_fmt = *p;
2872  break;
2873  }
2874 
2875  return *p;
2876 }
2877 
2879 {
2880  InputStream *ist = s->opaque;
2881 
2882  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2883  return ist->hwaccel_get_buffer(s, frame, flags);
2884 
2885  return avcodec_default_get_buffer2(s, frame, flags);
2886 }
2887 
2888 static int init_input_stream(int ist_index, char *error, int error_len)
2889 {
2890  int ret;
2891  InputStream *ist = input_streams[ist_index];
2892 
2893  if (ist->decoding_needed) {
2894  AVCodec *codec = ist->dec;
2895  if (!codec) {
2896  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2897  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2898  return AVERROR(EINVAL);
2899  }
2900 
2901  ist->dec_ctx->opaque = ist;
2902  ist->dec_ctx->get_format = get_format;
2903  ist->dec_ctx->get_buffer2 = get_buffer;
2904  ist->dec_ctx->thread_safe_callbacks = 1;
2905 
2906  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2907  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2908  (ist->decoding_needed & DECODING_FOR_OST)) {
2909  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2911  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2912  }
2913 
2914  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2915 
2916  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2917  * audio, and video decoders such as cuvid or mediacodec */
2918  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2919 
2920  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2921  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2922  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2924  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2925 
2926  ret = hw_device_setup_for_decode(ist);
2927  if (ret < 0) {
2928  snprintf(error, error_len, "Device setup failed for "
2929  "decoder on input stream #%d:%d : %s",
2930  ist->file_index, ist->st->index, av_err2str(ret));
2931  return ret;
2932  }
2933 
2934  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2935  if (ret == AVERROR_EXPERIMENTAL)
2936  abort_codec_experimental(codec, 0);
2937 
2938  snprintf(error, error_len,
2939  "Error while opening decoder for input stream "
2940  "#%d:%d : %s",
2941  ist->file_index, ist->st->index, av_err2str(ret));
2942  return ret;
2943  }
2945  }
2946 
2947  ist->next_pts = AV_NOPTS_VALUE;
2948  ist->next_dts = AV_NOPTS_VALUE;
2949 
2950  return 0;
2951 }
2952 
2954 {
2955  if (ost->source_index >= 0)
2956  return input_streams[ost->source_index];
2957  return NULL;
2958 }
2959 
2960 static int compare_int64(const void *a, const void *b)
2961 {
2962  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2963 }
2964 
2965 /* open the muxer when all the streams are initialized */
2966 static int check_init_output_file(OutputFile *of, int file_index)
2967 {
2968  int ret, i;
2969 
2970  for (i = 0; i < of->ctx->nb_streams; i++) {
2971  OutputStream *ost = output_streams[of->ost_index + i];
2972  if (!ost->initialized)
2973  return 0;
2974  }
2975 
2976  of->ctx->interrupt_callback = int_cb;
2977 
2978  ret = avformat_write_header(of->ctx, &of->opts);
2979  if (ret < 0) {
2981  "Could not write header for output file #%d "
2982  "(incorrect codec parameters ?): %s\n",
2983  file_index, av_err2str(ret));
2984  return ret;
2985  }
2986  //assert_avoptions(of->opts);
2987  of->header_written = 1;
2988 
2989  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2990 
2991  if (sdp_filename || want_sdp)
2992  print_sdp();
2993 
2994  /* flush the muxing queues */
2995  for (i = 0; i < of->ctx->nb_streams; i++) {
2996  OutputStream *ost = output_streams[of->ost_index + i];
2997 
2998  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2999  if (!av_fifo_size(ost->muxing_queue))
3000  ost->mux_timebase = ost->st->time_base;
3001 
3002  while (av_fifo_size(ost->muxing_queue)) {
3003  AVPacket pkt;
3004  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3005  write_packet(of, &pkt, ost, 1);
3006  }
3007  }
3008 
3009  return 0;
3010 }
3011 
3013 {
3014  AVBSFContext *ctx;
3015  int i, ret;
3016 
3017  if (!ost->nb_bitstream_filters)
3018  return 0;
3019 
3020  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3021  ctx = ost->bsf_ctx[i];
3022 
3023  ret = avcodec_parameters_copy(ctx->par_in,
3024  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3025  if (ret < 0)
3026  return ret;
3027 
3028  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3029 
3030  ret = av_bsf_init(ctx);
3031  if (ret < 0) {
3032  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3033  ost->bsf_ctx[i]->filter->name);
3034  return ret;
3035  }
3036  }
3037 
3038  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3039  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3040  if (ret < 0)
3041  return ret;
3042 
3043  ost->st->time_base = ctx->time_base_out;
3044 
3045  return 0;
3046 }
3047 
3049 {
3050  OutputFile *of = output_files[ost->file_index];
3051  InputStream *ist = get_input_stream(ost);
3052  AVCodecParameters *par_dst = ost->st->codecpar;
3053  AVCodecParameters *par_src = ost->ref_par;
3054  AVRational sar;
3055  int i, ret;
3056  uint32_t codec_tag = par_dst->codec_tag;
3057 
3058  av_assert0(ist && !ost->filter);
3059 
3060  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3061  if (ret >= 0)
3062  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3063  if (ret < 0) {
3065  "Error setting up codec context options.\n");
3066  return ret;
3067  }
3069 
3070  if (!codec_tag) {
3071  unsigned int codec_tag_tmp;
3072  if (!of->ctx->oformat->codec_tag ||
3073  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3074  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3075  codec_tag = par_src->codec_tag;
3076  }
3077 
3078  ret = avcodec_parameters_copy(par_dst, par_src);
3079  if (ret < 0)
3080  return ret;
3081 
3082  par_dst->codec_tag = codec_tag;
3083 
3084  if (!ost->frame_rate.num)
3085  ost->frame_rate = ist->framerate;
3086  ost->st->avg_frame_rate = ost->frame_rate;
3087 
3089  if (ret < 0)
3090  return ret;
3091 
3092  // copy timebase while removing common factors
3093  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3095 
3096  // copy estimated duration as a hint to the muxer
3097  if (ost->st->duration <= 0 && ist->st->duration > 0)
3098  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3099 
3100  // copy disposition
3101  ost->st->disposition = ist->st->disposition;
3102 
3103  if (ist->st->nb_side_data) {
3104  for (i = 0; i < ist->st->nb_side_data; i++) {
3105  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3106  uint8_t *dst_data;
3107 
3108  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3109  if (!dst_data)
3110  return AVERROR(ENOMEM);
3111  memcpy(dst_data, sd_src->data, sd_src->size);
3112  }
3113  }
3114 
3115  if (ost->rotate_overridden) {
3117  sizeof(int32_t) * 9);
3118  if (sd)
3119  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3120  }
3121 
3122  ost->parser = av_parser_init(par_dst->codec_id);
3123  ost->parser_avctx = avcodec_alloc_context3(NULL);
3124  if (!ost->parser_avctx)
3125  return AVERROR(ENOMEM);
3126 
3127  switch (par_dst->codec_type) {
3128  case AVMEDIA_TYPE_AUDIO:
3129  if (audio_volume != 256) {
3130  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3131  exit_program(1);
3132  }
3133  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3134  par_dst->block_align= 0;
3135  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3136  par_dst->block_align= 0;
3137  break;
3138  case AVMEDIA_TYPE_VIDEO:
3139  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3140  sar =
3141  av_mul_q(ost->frame_aspect_ratio,
3142  (AVRational){ par_dst->height, par_dst->width });
3143  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3144  "with stream copy may produce invalid files\n");
3145  }
3146  else if (ist->st->sample_aspect_ratio.num)
3147  sar = ist->st->sample_aspect_ratio;
3148  else
3149  sar = par_src->sample_aspect_ratio;
3150  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3151  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3152  ost->st->r_frame_rate = ist->st->r_frame_rate;
3153  break;
3154  }
3155 
3156  ost->mux_timebase = ist->st->time_base;
3157 
3158  return 0;
3159 }
3160 
3162 {
3163  AVDictionaryEntry *e;
3164 
3165  uint8_t *encoder_string;
3166  int encoder_string_len;
3167  int format_flags = 0;
3168  int codec_flags = ost->enc_ctx->flags;
3169 
3170  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3171  return;
3172 
3173  e = av_dict_get(of->opts, "fflags", NULL, 0);
3174  if (e) {
3175  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3176  if (!o)
3177  return;
3178  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3179  }
3180  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3181  if (e) {
3182  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3183  if (!o)
3184  return;
3185  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3186  }
3187 
3188  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3189  encoder_string = av_mallocz(encoder_string_len);
3190  if (!encoder_string)
3191  exit_program(1);
3192 
3193  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3194  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3195  else
3196  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3197  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3198  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3200 }
3201 
3202 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3203  AVCodecContext *avctx)
3204 {
3205  char *p;
3206  int n = 1, i, size, index = 0;
3207  int64_t t, *pts;
3208 
3209  for (p = kf; *p; p++)
3210  if (*p == ',')
3211  n++;
3212  size = n;
3213  pts = av_malloc_array(size, sizeof(*pts));
3214  if (!pts) {
3215  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3216  exit_program(1);
3217  }
3218 
3219  p = kf;
3220  for (i = 0; i < n; i++) {
3221  char *next = strchr(p, ',');
3222 
3223  if (next)
3224  *next++ = 0;
3225 
3226  if (!memcmp(p, "chapters", 8)) {
3227 
3228  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3229  int j;
3230 
3231  if (avf->nb_chapters > INT_MAX - size ||
3232  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3233  sizeof(*pts)))) {
3235  "Could not allocate forced key frames array.\n");
3236  exit_program(1);
3237  }
3238  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3239  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3240 
3241  for (j = 0; j < avf->nb_chapters; j++) {
3242  AVChapter *c = avf->chapters[j];
3243  av_assert1(index < size);
3244  pts[index++] = av_rescale_q(c->start, c->time_base,
3245  avctx->time_base) + t;
3246  }
3247 
3248  } else {
3249 
3250  t = parse_time_or_die("force_key_frames", p, 1);
3251  av_assert1(index < size);
3252  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3253 
3254  }
3255 
3256  p = next;
3257  }
3258 
3259  av_assert0(index == size);
3260  qsort(pts, size, sizeof(*pts), compare_int64);
3261  ost->forced_kf_count = size;
3262  ost->forced_kf_pts = pts;
3263 }
3264 
3265 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3266 {
3267  InputStream *ist = get_input_stream(ost);
3268  AVCodecContext *enc_ctx = ost->enc_ctx;
3269  AVFormatContext *oc;
3270 
3271  if (ost->enc_timebase.num > 0) {
3272  enc_ctx->time_base = ost->enc_timebase;
3273  return;
3274  }
3275 
3276  if (ost->enc_timebase.num < 0) {
3277  if (ist) {
3278  enc_ctx->time_base = ist->st->time_base;
3279  return;
3280  }
3281 
3282  oc = output_files[ost->file_index]->ctx;
3283  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3284  }
3285 
3286  enc_ctx->time_base = default_time_base;
3287 }
3288 
3290 {
3291  InputStream *ist = get_input_stream(ost);
3292  AVCodecContext *enc_ctx = ost->enc_ctx;
3294  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3295  int j, ret;
3296 
3297  set_encoder_id(output_files[ost->file_index], ost);
3298 
3299  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3300  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3301  // which have to be filtered out to prevent leaking them to output files.
3302  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3303 
3304  if (ist) {
3305  ost->st->disposition = ist->st->disposition;
3306 
3307  dec_ctx = ist->dec_ctx;
3308 
3309  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3310  } else {
3311  for (j = 0; j < oc->nb_streams; j++) {
3312  AVStream *st = oc->streams[j];
3313  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3314  break;
3315  }
3316  if (j == oc->nb_streams)
3317  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3320  }
3321 
3322  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3323  if (!ost->frame_rate.num)
3325  if (ist && !ost->frame_rate.num)
3326  ost->frame_rate = ist->framerate;
3327  if (ist && !ost->frame_rate.num)
3328  ost->frame_rate = ist->st->r_frame_rate;
3329  if (ist && !ost->frame_rate.num) {
3330  ost->frame_rate = (AVRational){25, 1};
3332  "No information "
3333  "about the input framerate is available. Falling "
3334  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3335  "if you want a different framerate.\n",
3336  ost->file_index, ost->index);
3337  }
3338 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3339  if (ost->enc->supported_framerates && !ost->force_fps) {
3340  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3341  ost->frame_rate = ost->enc->supported_framerates[idx];
3342  }
3343  // reduce frame rate for mpeg4 to be within the spec limits
3344  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3345  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3346  ost->frame_rate.num, ost->frame_rate.den, 65535);
3347  }
3348  }
3349 
3350  switch (enc_ctx->codec_type) {
3351  case AVMEDIA_TYPE_AUDIO:
3353  if (dec_ctx)
3354  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3355  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3359 
3360  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3361  break;
3362 
3363  case AVMEDIA_TYPE_VIDEO:
3365 
3366  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3368  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3370  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3371  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3372  }
3373  for (j = 0; j < ost->forced_kf_count; j++)
3374  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3376  enc_ctx->time_base);
3377 
3378  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3379  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3380  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3381  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3382  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3384 
3385  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3386  if (dec_ctx)
3387  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3388  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3389 
3390  enc_ctx->framerate = ost->frame_rate;
3391 
3392  ost->st->avg_frame_rate = ost->frame_rate;
3393 
3394  if (!dec_ctx ||
3395  enc_ctx->width != dec_ctx->width ||
3396  enc_ctx->height != dec_ctx->height ||
3397  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3399  }
3400 
3401  if (ost->forced_keyframes) {
3402  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3405  if (ret < 0) {
3407  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3408  return ret;
3409  }
3414 
3415  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416  // parse it only for static kf timings
3417  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3419  }
3420  }
3421  break;
3422  case AVMEDIA_TYPE_SUBTITLE:
3423  enc_ctx->time_base = AV_TIME_BASE_Q;
3424  if (!enc_ctx->width) {
3425  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3427  }
3428  break;
3429  case AVMEDIA_TYPE_DATA:
3430  break;
3431  default:
3432  abort();
3433  break;
3434  }
3435 
3436  ost->mux_timebase = enc_ctx->time_base;
3437 
3438  return 0;
3439 }
3440 
3441 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3442 {
3443  int ret = 0;
3444 
3445  if (ost->encoding_needed) {
3446  AVCodec *codec = ost->enc;
3447  AVCodecContext *dec = NULL;
3448  InputStream *ist;
3449 
3450  ret = init_output_stream_encode(ost);
3451  if (ret < 0)
3452  return ret;
3453 
3454  if ((ist = get_input_stream(ost)))
3455  dec = ist->dec_ctx;
3456  if (dec && dec->subtitle_header) {
3457  /* ASS code assumes this buffer is null terminated so add extra byte. */
3459  if (!ost->enc_ctx->subtitle_header)
3460  return AVERROR(ENOMEM);
3461  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3463  }
3464  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3465  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3466  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3467  !codec->defaults &&
3468  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3469  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3470  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3471 
3472  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3476  if (!ost->enc_ctx->hw_frames_ctx)
3477  return AVERROR(ENOMEM);
3478  } else {
3479  ret = hw_device_setup_for_encode(ost);
3480  if (ret < 0) {
3481  snprintf(error, error_len, "Device setup failed for "
3482  "encoder on output stream #%d:%d : %s",
3483  ost->file_index, ost->index, av_err2str(ret));
3484  return ret;
3485  }
3486  }
3487 
3488  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3489  if (ret == AVERROR_EXPERIMENTAL)
3490  abort_codec_experimental(codec, 1);
3491  snprintf(error, error_len,
3492  "Error while opening encoder for output stream #%d:%d - "
3493  "maybe incorrect parameters such as bit_rate, rate, width or height",
3494  ost->file_index, ost->index);
3495  return ret;
3496  }
3497  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3498  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3500  ost->enc_ctx->frame_size);
3502  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3503  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3504  " It takes bits/s as argument, not kbits/s\n");
3505 
3507  if (ret < 0) {
3509  "Error initializing the output stream codec context.\n");
3510  exit_program(1);
3511  }
3512  /*
3513  * FIXME: ost->st->codec should't be needed here anymore.
3514  */
3515  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3516  if (ret < 0)
3517  return ret;
3518 
3519  if (ost->enc_ctx->nb_coded_side_data) {
3520  int i;
3521 
3522  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3523  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3524  uint8_t *dst_data;
3525 
3526  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3527  if (!dst_data)
3528  return AVERROR(ENOMEM);
3529  memcpy(dst_data, sd_src->data, sd_src->size);
3530  }
3531  }
3532 
3533  /*
3534  * Add global input side data. For now this is naive, and copies it
3535  * from the input stream's global side data. All side data should
3536  * really be funneled over AVFrame and libavfilter, then added back to
3537  * packet side data, and then potentially using the first packet for
3538  * global side data.
3539  */
3540  if (ist) {
3541  int i;
3542  for (i = 0; i < ist->st->nb_side_data; i++) {
3543  AVPacketSideData *sd = &ist->st->side_data[i];
3544  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3545  if (!dst)
3546  return AVERROR(ENOMEM);
3547  memcpy(dst, sd->data, sd->size);
3548  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3549  av_display_rotation_set((uint32_t *)dst, 0);
3550  }
3551  }
3552 
3553  // copy timebase while removing common factors
3554  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3555  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3556 
3557  // copy estimated duration as a hint to the muxer
3558  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3559  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3560 
3561  ost->st->codec->codec= ost->enc_ctx->codec;
3562  } else if (ost->stream_copy) {
3563  ret = init_output_stream_streamcopy(ost);
3564  if (ret < 0)
3565  return ret;
3566 
3567  /*
3568  * FIXME: will the codec context used by the parser during streamcopy
3569  * This should go away with the new parser API.
3570  */
3571  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3572  if (ret < 0)
3573  return ret;
3574  }
3575 
3576  // parse user provided disposition, and update stream values
3577  if (ost->disposition) {
3578  static const AVOption opts[] = {
3579  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3580  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3581  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3582  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3583  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3584  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3585  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3586  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3587  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3588  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3589  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3590  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3591  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3592  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3593  { NULL },
3594  };
3595  static const AVClass class = {
3596  .class_name = "",
3597  .item_name = av_default_item_name,
3598  .option = opts,
3599  .version = LIBAVUTIL_VERSION_INT,
3600  };
3601  const AVClass *pclass = &class;
3602 
3603  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3604  if (ret < 0)
3605  return ret;
3606  }
3607 
3608  /* initialize bitstream filters for the output stream
3609  * needs to be done here, because the codec id for streamcopy is not
3610  * known until now */
3611  ret = init_output_bsfs(ost);
3612  if (ret < 0)
3613  return ret;
3614 
3615  ost->initialized = 1;
3616 
3617  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3618  if (ret < 0)
3619  return ret;
3620 
3621  return ret;
3622 }
3623 
3624 static void report_new_stream(int input_index, AVPacket *pkt)
3625 {
3626  InputFile *file = input_files[input_index];
3627  AVStream *st = file->ctx->streams[pkt->stream_index];
3628 
3629  if (pkt->stream_index < file->nb_streams_warn)
3630  return;
3631  av_log(file->ctx, AV_LOG_WARNING,
3632  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3634  input_index, pkt->stream_index,
3635  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3636  file->nb_streams_warn = pkt->stream_index + 1;
3637 }
3638 
3639 static int transcode_init(void)
3640 {
3641  int ret = 0, i, j, k;
3642  AVFormatContext *oc;
3643  OutputStream *ost;
3644  InputStream *ist;
3645  char error[1024] = {0};
3646 
3647  for (i = 0; i < nb_filtergraphs; i++) {
3648  FilterGraph *fg = filtergraphs[i];
3649  for (j = 0; j < fg->nb_outputs; j++) {
3650  OutputFilter *ofilter = fg->outputs[j];
3651  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3652  continue;
3653  if (fg->nb_inputs != 1)
3654  continue;
3655  for (k = nb_input_streams-1; k >= 0 ; k--)
3656  if (fg->inputs[0]->ist == input_streams[k])
3657  break;
3658  ofilter->ost->source_index = k;
3659  }
3660  }
3661 
3662  /* init framerate emulation */
3663  for (i = 0; i < nb_input_files; i++) {
3664  InputFile *ifile = input_files[i];
3665  if (ifile->rate_emu)
3666  for (j = 0; j < ifile->nb_streams; j++)
3667  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3668  }
3669 
3670  /* init input streams */
3671  for (i = 0; i < nb_input_streams; i++)
3672  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3673  for (i = 0; i < nb_output_streams; i++) {
3674  ost = output_streams[i];
3675  avcodec_close(ost->enc_ctx);
3676  }
3677  goto dump_format;
3678  }
3679 
3680  /* open each encoder */
3681  for (i = 0; i < nb_output_streams; i++) {
3682  // skip streams fed from filtergraphs until we have a frame for them
3683  if (output_streams[i]->filter)
3684  continue;
3685 
3686  ret = init_output_stream(output_streams[i], error, sizeof(error));
3687  if (ret < 0)
3688  goto dump_format;
3689  }
3690 
3691  /* discard unused programs */
3692  for (i = 0; i < nb_input_files; i++) {
3693  InputFile *ifile = input_files[i];
3694  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3695  AVProgram *p = ifile->ctx->programs[j];
3696  int discard = AVDISCARD_ALL;
3697 
3698  for (k = 0; k < p->nb_stream_indexes; k++)
3699  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3700  discard = AVDISCARD_DEFAULT;
3701  break;
3702  }
3703  p->discard = discard;
3704  }
3705  }
3706 
3707  /* write headers for files with no streams */
3708  for (i = 0; i < nb_output_files; i++) {
3709  oc = output_files[i]->ctx;
3710  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3711  ret = check_init_output_file(output_files[i], i);
3712  if (ret < 0)
3713  goto dump_format;
3714  }
3715  }
3716 
3717  dump_format:
3718  /* dump the stream mapping */
3719  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3720  for (i = 0; i < nb_input_streams; i++) {
3721  ist = input_streams[i];
3722 
3723  for (j = 0; j < ist->nb_filters; j++) {
3724  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3725  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3726  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3727  ist->filters[j]->name);
3728  if (nb_filtergraphs > 1)
3729  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3730  av_log(NULL, AV_LOG_INFO, "\n");
3731  }
3732  }
3733  }
3734 
3735  for (i = 0; i < nb_output_streams; i++) {
3736  ost = output_streams[i];
3737 
3738  if (ost->attachment_filename) {
3739  /* an attached file */
3740  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3741  ost->attachment_filename, ost->file_index, ost->index);
3742  continue;
3743  }
3744 
3745  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3746  /* output from a complex graph */
3747  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3748  if (nb_filtergraphs > 1)
3749  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3750 
3751  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3752  ost->index, ost->enc ? ost->enc->name : "?");
3753  continue;
3754  }
3755 
3756  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3757  input_streams[ost->source_index]->file_index,
3758  input_streams[ost->source_index]->st->index,
3759  ost->file_index,
3760  ost->index);
3761  if (ost->sync_ist != input_streams[ost->source_index])
3762  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3763  ost->sync_ist->file_index,
3764  ost->sync_ist->st->index);
3765  if (ost->stream_copy)
3766  av_log(NULL, AV_LOG_INFO, " (copy)");
3767  else {
3768  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3769  const AVCodec *out_codec = ost->enc;
3770  const char *decoder_name = "?";
3771  const char *in_codec_name = "?";
3772  const char *encoder_name = "?";
3773  const char *out_codec_name = "?";
3774  const AVCodecDescriptor *desc;
3775 
3776  if (in_codec) {
3777  decoder_name = in_codec->name;
3778  desc = avcodec_descriptor_get(in_codec->id);
3779  if (desc)
3780  in_codec_name = desc->name;
3781  if (!strcmp(decoder_name, in_codec_name))
3782  decoder_name = "native";
3783  }
3784 
3785  if (out_codec) {
3786  encoder_name = out_codec->name;
3787  desc = avcodec_descriptor_get(out_codec->id);
3788  if (desc)
3789  out_codec_name = desc->name;
3790  if (!strcmp(encoder_name, out_codec_name))
3791  encoder_name = "native";
3792  }
3793 
3794  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3795  in_codec_name, decoder_name,
3796  out_codec_name, encoder_name);
3797  }
3798  av_log(NULL, AV_LOG_INFO, "\n");
3799  }
3800 
3801  if (ret) {
3802  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3803  return ret;
3804  }
3805 
3807 
3808  return 0;
3809 }
3810 
3811 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3812 static int need_output(void)
3813 {
3814  int i;
3815 
3816  for (i = 0; i < nb_output_streams; i++) {
3817  OutputStream *ost = output_streams[i];
3818  OutputFile *of = output_files[ost->file_index];
3819  AVFormatContext *os = output_files[ost->file_index]->ctx;
3820 
3821  if (ost->finished ||
3822  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3823  continue;
3824  if (ost->frame_number >= ost->max_frames) {
3825  int j;
3826  for (j = 0; j < of->ctx->nb_streams; j++)
3827  close_output_stream(output_streams[of->ost_index + j]);
3828  continue;
3829  }
3830 
3831  return 1;
3832  }
3833 
3834  return 0;
3835 }
3836 
3837 /**
3838  * Select the output stream to process.
3839  *
3840  * @return selected output stream, or NULL if none available
3841  */
3843 {
3844  int i;
3845  int64_t opts_min = INT64_MAX;
3846  OutputStream *ost_min = NULL;
3847 
3848  for (i = 0; i < nb_output_streams; i++) {
3849  OutputStream *ost = output_streams[i];
3850  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3851  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3852  AV_TIME_BASE_Q);
3853  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3854  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3855 
3856  if (!ost->initialized && !ost->inputs_done)
3857  return ost;
3858 
3859  if (!ost->finished && opts < opts_min) {
3860  opts_min = opts;
3861  ost_min = ost->unavailable ? NULL : ost;
3862  }
3863  }
3864  return ost_min;
3865 }
3866 
3867 static void set_tty_echo(int on)
3868 {
3869 #if HAVE_TERMIOS_H
3870  struct termios tty;
3871  if (tcgetattr(0, &tty) == 0) {
3872  if (on) tty.c_lflag |= ECHO;
3873  else tty.c_lflag &= ~ECHO;
3874  tcsetattr(0, TCSANOW, &tty);
3875  }
3876 #endif
3877 }
3878 
3879 static int check_keyboard_interaction(int64_t cur_time)
3880 {
3881  int i, ret, key;
3882  static int64_t last_time;
3883  if (received_nb_signals)
3884  return AVERROR_EXIT;
3885  /* read_key() returns 0 on EOF */
3886  if(cur_time - last_time >= 100000 && !run_as_daemon){
3887  key = read_key();
3888  last_time = cur_time;
3889  }else
3890  key = -1;
3891  if (key == 'q')
3892  return AVERROR_EXIT;
3893  if (key == '+') av_log_set_level(av_log_get_level()+10);
3894  if (key == '-') av_log_set_level(av_log_get_level()-10);
3895  if (key == 's') qp_hist ^= 1;
3896  if (key == 'h'){
3897  if (do_hex_dump){
3898  do_hex_dump = do_pkt_dump = 0;
3899  } else if(do_pkt_dump){
3900  do_hex_dump = 1;
3901  } else
3902  do_pkt_dump = 1;
3904  }
3905  if (key == 'c' || key == 'C'){
3906  char buf[4096], target[64], command[256], arg[256] = {0};
3907  double time;
3908  int k, n = 0;
3909  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3910  i = 0;
3911  set_tty_echo(1);
3912  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3913  if (k > 0)
3914  buf[i++] = k;
3915  buf[i] = 0;
3916  set_tty_echo(0);
3917  fprintf(stderr, "\n");
3918  if (k > 0 &&
3919  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3920  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3921  target, time, command, arg);
3922  for (i = 0; i < nb_filtergraphs; i++) {
3923  FilterGraph *fg = filtergraphs[i];
3924  if (fg->graph) {
3925  if (time < 0) {
3926  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3927  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3928  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3929  } else if (key == 'c') {
3930  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3931  ret = AVERROR_PATCHWELCOME;
3932  } else {
3933  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3934  if (ret < 0)
3935  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3936  }
3937  }
3938  }
3939  } else {
3941  "Parse error, at least 3 arguments were expected, "
3942  "only %d given in string '%s'\n", n, buf);
3943  }
3944  }
3945  if (key == 'd' || key == 'D'){
3946  int debug=0;
3947  if(key == 'D') {
3948  debug = input_streams[0]->st->codec->debug<<1;
3949  if(!debug) debug = 1;
3950  while(debug & (FF_DEBUG_DCT_COEFF
3951 #if FF_API_DEBUG_MV
3952  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3953 #endif
3954  )) //unsupported, would just crash
3955  debug += debug;
3956  }else{
3957  char buf[32];
3958  int k = 0;
3959  i = 0;
3960  set_tty_echo(1);
3961  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3962  if (k > 0)
3963  buf[i++] = k;
3964  buf[i] = 0;
3965  set_tty_echo(0);
3966  fprintf(stderr, "\n");
3967  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3968  fprintf(stderr,"error parsing debug value\n");
3969  }
3970  for(i=0;i<nb_input_streams;i++) {
3971  input_streams[i]->st->codec->debug = debug;
3972  }
3973  for(i=0;i<nb_output_streams;i++) {
3974  OutputStream *ost = output_streams[i];
3975  ost->enc_ctx->debug = debug;
3976  }
3977  if(debug) av_log_set_level(AV_LOG_DEBUG);
3978  fprintf(stderr,"debug=%d\n", debug);
3979  }
3980  if (key == '?'){
3981  fprintf(stderr, "key function\n"
3982  "? show this help\n"
3983  "+ increase verbosity\n"
3984  "- decrease verbosity\n"
3985  "c Send command to first matching filter supporting it\n"
3986  "C Send/Queue command to all matching filters\n"
3987  "D cycle through available debug modes\n"
3988  "h dump packets/hex press to cycle through the 3 states\n"
3989  "q quit\n"
3990  "s Show QP histogram\n"
3991  );
3992  }
3993  return 0;
3994 }
3995 
3996 #if HAVE_THREADS
3997 static void *input_thread(void *arg)
3998 {
3999  InputFile *f = arg;
4000  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4001  int ret = 0;
4002 
4003  while (1) {
4004  AVPacket pkt;
4005  ret = av_read_frame(f->ctx, &pkt);
4006 
4007  if (ret == AVERROR(EAGAIN)) {
4008  av_usleep(10000);
4009  continue;
4010  }
4011  if (ret < 0) {
4012  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4013  break;
4014  }
4015  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4016  if (flags && ret == AVERROR(EAGAIN)) {
4017  flags = 0;
4018  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4020  "Thread message queue blocking; consider raising the "
4021  "thread_queue_size option (current value: %d)\n",
4022  f->thread_queue_size);
4023  }
4024  if (ret < 0) {
4025  if (ret != AVERROR_EOF)
4026  av_log(f->ctx, AV_LOG_ERROR,
4027  "Unable to send packet to main thread: %s\n",
4028  av_err2str(ret));
4029  av_packet_unref(&pkt);
4030  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4031  break;
4032  }
4033  }
4034 
4035  return NULL;
4036 }
4037 
4038 static void free_input_threads(void)
4039 {
4040  int i;
4041 
4042  for (i = 0; i < nb_input_files; i++) {
4043  InputFile *f = input_files[i];
4044  AVPacket pkt;
4045 
4046  if (!f || !f->in_thread_queue)
4047  continue;
4049  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4050  av_packet_unref(&pkt);
4051 
4052  pthread_join(f->thread, NULL);
4053  f->joined = 1;
4054  av_thread_message_queue_free(&f->in_thread_queue);
4055  }
4056 }
4057 
4058 static int init_input_threads(void)
4059 {
4060  int i, ret;
4061 
4062  if (nb_input_files == 1)
4063  return 0;
4064 
4065  for (i = 0; i < nb_input_files; i++) {
4066  InputFile *f = input_files[i];
4067 
4068  if (f->ctx->pb ? !f->ctx->pb->seekable :
4069  strcmp(f->ctx->iformat->name, "lavfi"))
4070  f->non_blocking = 1;
4071  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4072  f->thread_queue_size, sizeof(AVPacket));
4073  if (ret < 0)
4074  return ret;
4075 
4076  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4077  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4078  av_thread_message_queue_free(&f->in_thread_queue);
4079  return AVERROR(ret);
4080  }
4081  }
4082  return 0;
4083 }
4084 
4085 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4086 {
4087  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4088  f->non_blocking ?
4090 }
4091 #endif
4092 
4094 {
4095  if (f->rate_emu) {
4096  int i;
4097  for (i = 0; i < f->nb_streams; i++) {
4098  InputStream *ist = input_streams[f->ist_index + i];
4099  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4100  int64_t now = av_gettime_relative() - ist->start;
4101  if (pts > now)
4102  return AVERROR(EAGAIN);
4103  }
4104  }
4105 
4106 #if HAVE_THREADS
4107  if (nb_input_files > 1)
4108  return get_input_packet_mt(f, pkt);
4109 #endif
4110  return av_read_frame(f->ctx, pkt);
4111 }
4112 
4113 static int got_eagain(void)
4114 {
4115  int i;
4116  for (i = 0; i < nb_output_streams; i++)
4117  if (output_streams[i]->unavailable)
4118  return 1;
4119  return 0;
4120 }
4121 
4122 static void reset_eagain(void)
4123 {
4124  int i;
4125  for (i = 0; i < nb_input_files; i++)
4126  input_files[i]->eagain = 0;
4127  for (i = 0; i < nb_output_streams; i++)
4128  output_streams[i]->unavailable = 0;
4129 }
4130 
4131 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4132 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4133  AVRational time_base)
4134 {
4135  int ret;
4136 
4137  if (!*duration) {
4138  *duration = tmp;
4139  return tmp_time_base;
4140  }
4141 
4142  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4143  if (ret < 0) {
4144  *duration = tmp;
4145  return tmp_time_base;
4146  }
4147 
4148  return time_base;
4149 }
4150 
4152 {
4153  InputStream *ist;
4154  AVCodecContext *avctx;
4155  int i, ret, has_audio = 0;
4156  int64_t duration = 0;
4157 
4158  ret = av_seek_frame(is, -1, is->start_time, 0);
4159  if (ret < 0)
4160  return ret;
4161 
4162  for (i = 0; i < ifile->nb_streams; i++) {
4163  ist = input_streams[ifile->ist_index + i];
4164  avctx = ist->dec_ctx;
4165 
4166  // flush decoders
4167  if (ist->decoding_needed) {
4168  process_input_packet(ist, NULL, 1);
4169  avcodec_flush_buffers(avctx);
4170  }
4171 
4172  /* duration is the length of the last frame in a stream
4173  * when audio stream is present we don't care about
4174  * last video frame length because it's not defined exactly */
4175  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4176  has_audio = 1;
4177  }
4178 
4179  for (i = 0; i < ifile->nb_streams; i++) {
4180  ist = input_streams[ifile->ist_index + i];
4181  avctx = ist->dec_ctx;
4182 
4183  if (has_audio) {
4184  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4185  AVRational sample_rate = {1, avctx->sample_rate};
4186 
4187  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4188  } else {
4189  continue;
4190  }
4191  } else {
4192  if (ist->framerate.num) {
4193  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4194  } else if (ist->st->avg_frame_rate.num) {
4195  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4196  } else {
4197  duration = 1;
4198  }
4199  }
4200  if (!ifile->duration)
4201  ifile->time_base = ist->st->time_base;
4202  /* the total duration of the stream, max_pts - min_pts is
4203  * the duration of the stream without the last frame */
4204  duration += ist->max_pts - ist->min_pts;
4205  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4206  ifile->time_base);
4207  }
4208 
4209  if (ifile->loop > 0)
4210  ifile->loop--;
4211 
4212  return ret;
4213 }
4214 
4215 /*
4216  * Return
4217  * - 0 -- one packet was read and processed
4218  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4219  * this function should be called again
4220  * - AVERROR_EOF -- this function should not be called again
4221  */
4222 static int process_input(int file_index)
4223 {
4224  InputFile *ifile = input_files[file_index];
4225  AVFormatContext *is;
4226  InputStream *ist;
4227  AVPacket pkt;
4228  int ret, i, j;
4229  int64_t duration;
4230  int64_t pkt_dts;
4231 
4232  is = ifile->ctx;
4233  ret = get_input_packet(ifile, &pkt);
4234 
4235  if (ret == AVERROR(EAGAIN)) {
4236  ifile->eagain = 1;
4237  return ret;
4238  }
4239  if (ret < 0 && ifile->loop) {
4240  ret = seek_to_start(ifile, is);
4241  if (ret < 0)
4242  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4243  else
4244  ret = get_input_packet(ifile, &pkt);
4245  if (ret == AVERROR(EAGAIN)) {
4246  ifile->eagain = 1;
4247  return ret;
4248  }
4249  }
4250  if (ret < 0) {
4251  if (ret != AVERROR_EOF) {
4252  print_error(is->filename, ret);
4253  if (exit_on_error)
4254  exit_program(1);
4255  }
4256 
4257  for (i = 0; i < ifile->nb_streams; i++) {
4258  ist = input_streams[ifile->ist_index + i];
4259  if (ist->decoding_needed) {
4260  ret = process_input_packet(ist, NULL, 0);
4261  if (ret>0)
4262  return 0;
4263  }
4264 
4265  /* mark all outputs that don't go through lavfi as finished */
4266  for (j = 0; j < nb_output_streams; j++) {
4267  OutputStream *ost = output_streams[j];
4268 
4269  if (ost->source_index == ifile->ist_index + i &&
4270  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4271  finish_output_stream(ost);
4272  }
4273  }
4274 
4275  ifile->eof_reached = 1;
4276  return AVERROR(EAGAIN);
4277  }
4278 
4279  reset_eagain();
4280 
4281  if (do_pkt_dump) {
4283  is->streams[pkt.stream_index]);
4284  }
4285  /* the following test is needed in case new streams appear
4286  dynamically in stream : we ignore them */
4287  if (pkt.stream_index >= ifile->nb_streams) {
4288  report_new_stream(file_index, &pkt);
4289  goto discard_packet;
4290  }
4291 
4292  ist = input_streams[ifile->ist_index + pkt.stream_index];
4293 
4294  ist->data_size += pkt.size;
4295  ist->nb_packets++;
4296 
4297  if (ist->discard)
4298  goto discard_packet;
4299 
4300  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4301  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4302  exit_program(1);
4303  }
4304 
4305  if (debug_ts) {
4306  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4307  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4311  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4312  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4313  av_ts2str(input_files[ist->file_index]->ts_offset),
4314  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4315  }
4316 
4317  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4318  int64_t stime, stime2;
4319  // Correcting starttime based on the enabled streams
4320  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4321  // so we instead do it here as part of discontinuity handling
4322  if ( ist->next_dts == AV_NOPTS_VALUE
4323  && ifile->ts_offset == -is->start_time
4324  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4325  int64_t new_start_time = INT64_MAX;
4326  for (i=0; i<is->nb_streams; i++) {
4327  AVStream *st = is->streams[i];
4328  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4329  continue;
4330  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4331  }
4332  if (new_start_time > is->start_time) {
4333  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4334  ifile->ts_offset = -new_start_time;
4335  }
4336  }
4337 
4338  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4339  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4340  ist->wrap_correction_done = 1;
4341 
4342  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4343  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4344  ist->wrap_correction_done = 0;
4345  }
4346  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4347  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4348  ist->wrap_correction_done = 0;
4349  }
4350  }
4351 
4352  /* add the stream-global side data to the first packet */
4353  if (ist->nb_packets == 1) {
4354  for (i = 0; i < ist->st->nb_side_data; i++) {
4355  AVPacketSideData *src_sd = &ist->st->side_data[i];
4356  uint8_t *dst_data;
4357 
4358  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4359  continue;
4360 
4361  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4362  continue;
4363 
4364  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4365  if (!dst_data)
4366  exit_program(1);
4367 
4368  memcpy(dst_data, src_sd->data, src_sd->size);
4369  }
4370  }
4371 
4372  if (pkt.dts != AV_NOPTS_VALUE)
4373  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4374  if (pkt.pts != AV_NOPTS_VALUE)
4375  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4376 
4377  if (pkt.pts != AV_NOPTS_VALUE)
4378  pkt.pts *= ist->ts_scale;
4379  if (pkt.dts != AV_NOPTS_VALUE)
4380  pkt.dts *= ist->ts_scale;
4381 
4383  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4385  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4386  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4387  int64_t delta = pkt_dts - ifile->last_ts;
4388  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4389  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4390  ifile->ts_offset -= delta;
4392  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4393  delta, ifile->ts_offset);
4394  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4395  if (pkt.pts != AV_NOPTS_VALUE)
4396  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4397  }
4398  }
4399 
4400  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4401  if (pkt.pts != AV_NOPTS_VALUE) {
4402  pkt.pts += duration;
4403  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4404  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4405  }
4406 
4407  if (pkt.dts != AV_NOPTS_VALUE)
4408  pkt.dts += duration;
4409 
4411  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4413  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4414  !copy_ts) {
4415  int64_t delta = pkt_dts - ist->next_dts;
4416  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4417  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4418  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4419  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4420  ifile->ts_offset -= delta;
4422  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4423  delta, ifile->ts_offset);
4424  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4425  if (pkt.pts != AV_NOPTS_VALUE)
4426  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4427  }
4428  } else {
4429  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4430  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4431  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4432  pkt.dts = AV_NOPTS_VALUE;
4433  }
4434  if (pkt.pts != AV_NOPTS_VALUE){
4435  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4436  delta = pkt_pts - ist->next_dts;
4437  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4438  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4439  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4440  pkt.pts = AV_NOPTS_VALUE;
4441  }
4442  }
4443  }
4444  }
4445 
4446  if (pkt.dts != AV_NOPTS_VALUE)
4447  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4448 
4449  if (debug_ts) {
4450  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4452  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4453  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4454  av_ts2str(input_files[ist->file_index]->ts_offset),
4455  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4456  }
4457 
4458  sub2video_heartbeat(ist, pkt.pts);
4459 
4460  process_input_packet(ist, &pkt, 0);
4461 
4462 discard_packet:
4463  av_packet_unref(&pkt);
4464 
4465  return 0;
4466 }
4467 
4468 /**
4469  * Perform a step of transcoding for the specified filter graph.
4470  *
4471  * @param[in] graph filter graph to consider
4472  * @param[out] best_ist input stream where a frame would allow to continue
4473  * @return 0 for success, <0 for error
4474  */
4475 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4476 {
4477  int i, ret;
4478  int nb_requests, nb_requests_max = 0;
4479  InputFilter *ifilter;
4480  InputStream *ist;
4481 
4482  *best_ist = NULL;
4483  ret = avfilter_graph_request_oldest(graph->graph);
4484  if (ret >= 0)
4485  return reap_filters(0);
4486 
4487  if (ret == AVERROR_EOF) {
4488  ret = reap_filters(1);
4489  for (i = 0; i < graph->nb_outputs; i++)
4490  close_output_stream(graph->outputs[i]->ost);
4491  return ret;
4492  }
4493  if (ret != AVERROR(EAGAIN))
4494  return ret;
4495 
4496  for (i = 0; i < graph->nb_inputs; i++) {
4497  ifilter = graph->inputs[i];
4498  ist = ifilter->ist;
4499  if (input_files[ist->file_index]->eagain ||
4500  input_files[ist->file_index]->eof_reached)
4501  continue;
4502  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4503  if (nb_requests > nb_requests_max) {
4504  nb_requests_max = nb_requests;
4505  *best_ist = ist;
4506  }
4507  }
4508 
4509  if (!*best_ist)
4510  for (i = 0; i < graph->nb_outputs; i++)
4511  graph->outputs[i]->ost->unavailable = 1;
4512 
4513  return 0;
4514 }
4515 
4516 /**
4517  * Run a single step of transcoding.
4518  *
4519  * @return 0 for success, <0 for error
4520  */
4521 static int transcode_step(void)
4522 {
4523  OutputStream *ost;
4524  InputStream *ist = NULL;
4525  int ret;
4526 
4527  ost = choose_output();
4528  if (!ost) {
4529  if (got_eagain()) {
4530  reset_eagain();
4531  av_usleep(10000);
4532  return 0;
4533  }
4534  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4535  return AVERROR_EOF;
4536  }
4537 
4538  if (ost->filter && !ost->filter->graph->graph) {
4540  ret = configure_filtergraph(ost->filter->graph);
4541  if (ret < 0) {
4542  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4543  return ret;
4544  }
4545  }
4546  }
4547 
4548  if (ost->filter && ost->filter->graph->graph) {
4549  if (!ost->initialized) {
4550  char error[1024] = {0};
4551  ret = init_output_stream(ost, error, sizeof(error));
4552  if (ret < 0) {
4553  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4554  ost->file_index, ost->index, error);
4555  exit_program(1);
4556  }
4557  }
4558  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4559  return ret;
4560  if (!ist)
4561  return 0;
4562  } else if (ost->filter) {
4563  int i;
4564  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4565  InputFilter *ifilter = ost->filter->graph->inputs[i];
4566  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4567  ist = ifilter->ist;
4568  break;
4569  }
4570  }
4571  if (!ist) {
4572  ost->inputs_done = 1;
4573  return 0;
4574  }
4575  } else {
4576  av_assert0(ost->source_index >= 0);
4577  ist = input_streams[ost->source_index];
4578  }
4579 
4580  ret = process_input(ist->file_index);
4581  if (ret == AVERROR(EAGAIN)) {
4582  if (input_files[ist->file_index]->eagain)
4583  ost->unavailable = 1;
4584  return 0;
4585  }
4586 
4587  if (ret < 0)
4588  return ret == AVERROR_EOF ? 0 : ret;
4589 
4590  return reap_filters(0);
4591 }
4592 
4593 /*
4594  * The following code is the main loop of the file converter
4595  */
4596 static int transcode(void)
4597 {
4598  int ret, i;
4599  AVFormatContext *os;
4600  OutputStream *ost;
4601  InputStream *ist;
4602  int64_t timer_start;
4603  int64_t total_packets_written = 0;
4604 
4605  ret = transcode_init();
4606  if (ret < 0)
4607  goto fail;
4608 
4609  if (stdin_interaction) {
4610  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4611  }
4612 
4613  timer_start = av_gettime_relative();
4614 
4615 #if HAVE_THREADS
4616  if ((ret = init_input_threads()) < 0)
4617  goto fail;
4618 #endif
4619 
4620  while (!received_sigterm) {
4621  int64_t cur_time= av_gettime_relative();
4622 
4623  /* if 'q' pressed, exits */
4624  if (stdin_interaction)
4625  if (check_keyboard_interaction(cur_time) < 0)
4626  break;
4627 
4628  /* check if there's any stream where output is still needed */
4629  if (!need_output()) {
4630  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4631  break;
4632  }
4633 
4634  ret = transcode_step();
4635  if (ret < 0 && ret != AVERROR_EOF) {
4636  char errbuf[128];
4637  av_strerror(ret, errbuf, sizeof(errbuf));
4638 
4639  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4640  break;
4641  }
4642 
4643  /* dump report by using the output first video and audio streams */
4644  print_report(0, timer_start, cur_time);
4645  }
4646 #if HAVE_THREADS
4647  free_input_threads();
4648 #endif
4649 
4650  /* at the end of stream, we must flush the decoder buffers */
4651  for (i = 0; i < nb_input_streams; i++) {
4652  ist = input_streams[i];
4653  if (!input_files[ist->file_index]->eof_reached) {
4654  process_input_packet(ist, NULL, 0);
4655  }
4656  }
4657  flush_encoders();
4658 
4659  term_exit();
4660 
4661  /* write the trailer if needed and close file */
4662  for (i = 0; i < nb_output_files; i++) {
4663  os = output_files[i]->ctx;
4664  if (!output_files[i]->header_written) {
4666  "Nothing was written into output file %d (%s), because "
4667  "at least one of its streams received no packets.\n",
4668  i, os->filename);
4669  continue;
4670  }
4671  if ((ret = av_write_trailer(os)) < 0) {
4672  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4673  if (exit_on_error)
4674  exit_program(1);
4675  }
4676  }
4677 
4678  /* dump report by using the first video and audio streams */
4679  print_report(1, timer_start, av_gettime_relative());
4680 
4681  /* close each encoder */
4682  for (i = 0; i < nb_output_streams; i++) {
4683  ost = output_streams[i];
4684  if (ost->encoding_needed) {
4685  av_freep(&ost->enc_ctx->stats_in);
4686  }
4687  total_packets_written += ost->packets_written;
4688  }
4689 
4690  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4691  av_log(NULL, AV_LOG_FATAL, "Empt