FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223 
224  av_assert1(frame->data[0]);
225  ist->sub2video.last_pts = frame->pts = pts;
226  for (i = 0; i < ist->nb_filters; i++)
230 }
231 
233 {
234  AVFrame *frame = ist->sub2video.frame;
235  int8_t *dst;
236  int dst_linesize;
237  int num_rects, i;
238  int64_t pts, end_pts;
239 
240  if (!frame)
241  return;
242  if (sub) {
243  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244  AV_TIME_BASE_Q, ist->st->time_base);
245  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  num_rects = sub->num_rects;
248  } else {
249  pts = ist->sub2video.end_pts;
250  end_pts = INT64_MAX;
251  num_rects = 0;
252  }
253  if (sub2video_get_blank_frame(ist) < 0) {
255  "Impossible to get a blank canvas.\n");
256  return;
257  }
258  dst = frame->data [0];
259  dst_linesize = frame->linesize[0];
260  for (i = 0; i < num_rects; i++)
261  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262  sub2video_push_ref(ist, pts);
263  ist->sub2video.end_pts = end_pts;
264 }
265 
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 {
268  InputFile *infile = input_files[ist->file_index];
269  int i, j, nb_reqs;
270  int64_t pts2;
271 
272  /* When a frame is read from a file, examine all sub2video streams in
273  the same file and send the sub2video frame again. Otherwise, decoded
274  video frames could be accumulating in the filter graph while a filter
275  (possibly overlay) is desperately waiting for a subtitle frame. */
276  for (i = 0; i < infile->nb_streams; i++) {
277  InputStream *ist2 = input_streams[infile->ist_index + i];
278  if (!ist2->sub2video.frame)
279  continue;
280  /* subtitles seem to be usually muxed ahead of other streams;
281  if not, subtracting a larger time here is necessary */
282  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283  /* do not send the heartbeat frame if the subtitle is already ahead */
284  if (pts2 <= ist2->sub2video.last_pts)
285  continue;
286  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287  sub2video_update(ist2, NULL);
288  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290  if (nb_reqs)
291  sub2video_push_ref(ist2, pts2);
292  }
293 }
294 
295 static void sub2video_flush(InputStream *ist)
296 {
297  int i;
298 
299  if (ist->sub2video.end_pts < INT64_MAX)
300  sub2video_update(ist, NULL);
301  for (i = 0; i < ist->nb_filters; i++)
303 }
304 
305 /* end of sub2video hack */
306 
307 static void term_exit_sigsafe(void)
308 {
309 #if HAVE_TERMIOS_H
310  if(restore_tty)
311  tcsetattr (0, TCSANOW, &oldtty);
312 #endif
313 }
314 
315 void term_exit(void)
316 {
317  av_log(NULL, AV_LOG_QUIET, "%s", "");
319 }
320 
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
326 
327 static void
329 {
330  received_sigterm = sig;
333  if(received_nb_signals > 3) {
334  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335  strlen("Received > 3 system signals, hard exiting\n"));
336 
337  exit(123);
338  }
339 }
340 
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 {
344  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 
346  switch (fdwCtrlType)
347  {
348  case CTRL_C_EVENT:
349  case CTRL_BREAK_EVENT:
350  sigterm_handler(SIGINT);
351  return TRUE;
352 
353  case CTRL_CLOSE_EVENT:
354  case CTRL_LOGOFF_EVENT:
355  case CTRL_SHUTDOWN_EVENT:
356  sigterm_handler(SIGTERM);
357  /* Basically, with these 3 events, when we return from this method the
358  process is hard terminated, so stall as long as we need to
359  to try and let the main thread(s) clean up and gracefully terminate
360  (we have at most 5 seconds, but should be done far before that). */
361  while (!ffmpeg_exited) {
362  Sleep(0);
363  }
364  return TRUE;
365 
366  default:
367  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368  return FALSE;
369  }
370 }
371 #endif
372 
373 void term_init(void)
374 {
375 #if HAVE_TERMIOS_H
377  struct termios tty;
378  if (tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480  AVFrame *frame;
481  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482  sizeof(frame), NULL);
483  av_frame_free(&frame);
484  }
485  av_fifo_freep(&fg->inputs[j]->frame_queue);
486  if (fg->inputs[j]->ist->sub2video.sub_queue) {
487  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488  AVSubtitle sub;
490  &sub, sizeof(sub), NULL);
491  avsubtitle_free(&sub);
492  }
494  }
496  av_freep(&fg->inputs[j]->name);
497  av_freep(&fg->inputs[j]);
498  }
499  av_freep(&fg->inputs);
500  for (j = 0; j < fg->nb_outputs; j++) {
501  av_freep(&fg->outputs[j]->name);
502  av_freep(&fg->outputs[j]->formats);
503  av_freep(&fg->outputs[j]->channel_layouts);
504  av_freep(&fg->outputs[j]->sample_rates);
505  av_freep(&fg->outputs[j]);
506  }
507  av_freep(&fg->outputs);
508  av_freep(&fg->graph_desc);
509 
510  av_freep(&filtergraphs[i]);
511  }
512  av_freep(&filtergraphs);
513 
515 
516  /* close files */
517  for (i = 0; i < nb_output_files; i++) {
518  OutputFile *of = output_files[i];
520  if (!of)
521  continue;
522  s = of->ctx;
523  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524  avio_closep(&s->pb);
526  av_dict_free(&of->opts);
527 
528  av_freep(&output_files[i]);
529  }
530  for (i = 0; i < nb_output_streams; i++) {
531  OutputStream *ost = output_streams[i];
532 
533  if (!ost)
534  continue;
535 
536  for (j = 0; j < ost->nb_bitstream_filters; j++)
537  av_bsf_free(&ost->bsf_ctx[j]);
538  av_freep(&ost->bsf_ctx);
540 
542  av_frame_free(&ost->last_frame);
543  av_dict_free(&ost->encoder_opts);
544 
545  av_parser_close(ost->parser);
547 
548  av_freep(&ost->forced_keyframes);
550  av_freep(&ost->avfilter);
551  av_freep(&ost->logfile_prefix);
552 
554  ost->audio_channels_mapped = 0;
555 
556  av_dict_free(&ost->sws_dict);
557 
560 
561  if (ost->muxing_queue) {
562  while (av_fifo_size(ost->muxing_queue)) {
563  AVPacket pkt;
564  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565  av_packet_unref(&pkt);
566  }
568  }
569 
570  av_freep(&output_streams[i]);
571  }
572 #if HAVE_PTHREADS
573  free_input_threads();
574 #endif
575  for (i = 0; i < nb_input_files; i++) {
576  avformat_close_input(&input_files[i]->ctx);
577  av_freep(&input_files[i]);
578  }
579  for (i = 0; i < nb_input_streams; i++) {
580  InputStream *ist = input_streams[i];
581 
584  av_dict_free(&ist->decoder_opts);
587  av_freep(&ist->filters);
588  av_freep(&ist->hwaccel_device);
589  av_freep(&ist->dts_buffer);
590 
592 
593  av_freep(&input_streams[i]);
594  }
595 
596  if (vstats_file) {
597  if (fclose(vstats_file))
599  "Error closing vstats file, loss of information possible: %s\n",
600  av_err2str(AVERROR(errno)));
601  }
603 
604  av_freep(&input_streams);
605  av_freep(&input_files);
606  av_freep(&output_streams);
607  av_freep(&output_files);
608 
609  uninit_opts();
610 
612 
613  if (received_sigterm) {
614  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615  (int) received_sigterm);
616  } else if (ret && atomic_load(&transcode_init_done)) {
617  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
618  }
619  term_exit();
620  ffmpeg_exited = 1;
621 }
622 
624 {
625  AVDictionaryEntry *t = NULL;
626 
627  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
629  }
630 }
631 
633 {
635  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
637  exit_program(1);
638  }
639 }
640 
641 static void abort_codec_experimental(AVCodec *c, int encoder)
642 {
643  exit_program(1);
644 }
645 
646 static void update_benchmark(const char *fmt, ...)
647 {
648  if (do_benchmark_all) {
649  int64_t t = getutime();
650  va_list va;
651  char buf[1024];
652 
653  if (fmt) {
654  va_start(va, fmt);
655  vsnprintf(buf, sizeof(buf), fmt, va);
656  va_end(va);
657  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
658  }
659  current_time = t;
660  }
661 }
662 
663 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
664 {
665  int i;
666  for (i = 0; i < nb_output_streams; i++) {
667  OutputStream *ost2 = output_streams[i];
668  ost2->finished |= ost == ost2 ? this_stream : others;
669  }
670 }
671 
673 {
674  AVFormatContext *s = of->ctx;
675  AVStream *st = ost->st;
676  int ret;
677 
678  if (!of->header_written) {
679  AVPacket tmp_pkt = {0};
680  /* the muxer is not initialized yet, buffer the packet */
681  if (!av_fifo_space(ost->muxing_queue)) {
682  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
683  ost->max_muxing_queue_size);
684  if (new_size <= av_fifo_size(ost->muxing_queue)) {
686  "Too many packets buffered for output stream %d:%d.\n",
687  ost->file_index, ost->st->index);
688  exit_program(1);
689  }
690  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
691  if (ret < 0)
692  exit_program(1);
693  }
694  ret = av_packet_ref(&tmp_pkt, pkt);
695  if (ret < 0)
696  exit_program(1);
697  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
698  av_packet_unref(pkt);
699  return;
700  }
701 
704  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
705 
706  /*
707  * Audio encoders may split the packets -- #frames in != #packets out.
708  * But there is no reordering, so we can limit the number of output packets
709  * by simply dropping them here.
710  * Counting encoded video frames needs to be done separately because of
711  * reordering, see do_video_out()
712  */
713  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
714  if (ost->frame_number >= ost->max_frames) {
715  av_packet_unref(pkt);
716  return;
717  }
718  ost->frame_number++;
719  }
720  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
721  int i;
723  NULL);
724  ost->quality = sd ? AV_RL32(sd) : -1;
725  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
726 
727  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
728  if (sd && i < sd[5])
729  ost->error[i] = AV_RL64(sd + 8 + 8*i);
730  else
731  ost->error[i] = -1;
732  }
733 
734  if (ost->frame_rate.num && ost->is_cfr) {
735  if (pkt->duration > 0)
736  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
737  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
738  ost->mux_timebase);
739  }
740  }
741 
742  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
743 
744  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
745  if (pkt->dts != AV_NOPTS_VALUE &&
746  pkt->pts != AV_NOPTS_VALUE &&
747  pkt->dts > pkt->pts) {
748  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
749  pkt->dts, pkt->pts,
750  ost->file_index, ost->st->index);
751  pkt->pts =
752  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
753  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
754  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
755  }
757  pkt->dts != AV_NOPTS_VALUE &&
758  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
759  ost->last_mux_dts != AV_NOPTS_VALUE) {
760  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
761  if (pkt->dts < max) {
762  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
763  av_log(s, loglevel, "Non-monotonous DTS in output stream "
764  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
765  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
766  if (exit_on_error) {
767  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
768  exit_program(1);
769  }
770  av_log(s, loglevel, "changing to %"PRId64". This may result "
771  "in incorrect timestamps in the output file.\n",
772  max);
773  if (pkt->pts >= pkt->dts)
774  pkt->pts = FFMAX(pkt->pts, max);
775  pkt->dts = max;
776  }
777  }
778  }
779  ost->last_mux_dts = pkt->dts;
780 
781  ost->data_size += pkt->size;
782  ost->packets_written++;
783 
784  pkt->stream_index = ost->index;
785 
786  if (debug_ts) {
787  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
788  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
791  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
792  pkt->size
793  );
794  }
795 
796  ret = av_interleaved_write_frame(s, pkt);
797  if (ret < 0) {
798  print_error("av_interleaved_write_frame()", ret);
799  main_return_code = 1;
801  }
802  av_packet_unref(pkt);
803 }
804 
806 {
807  OutputFile *of = output_files[ost->file_index];
808 
809  ost->finished |= ENCODER_FINISHED;
810  if (of->shortest) {
811  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
812  of->recording_time = FFMIN(of->recording_time, end);
813  }
814 }
815 
817 {
818  int ret = 0;
819 
820  /* apply the output bitstream filters, if any */
821  if (ost->nb_bitstream_filters) {
822  int idx;
823 
824  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
825  if (ret < 0)
826  goto finish;
827 
828  idx = 1;
829  while (idx) {
830  /* get a packet from the previous filter up the chain */
831  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
832  if (ret == AVERROR(EAGAIN)) {
833  ret = 0;
834  idx--;
835  continue;
836  } else if (ret < 0)
837  goto finish;
838  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
839  * the api states this shouldn't happen after init(). Propagate it here to the
840  * muxer and to the next filters in the chain to workaround this.
841  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
842  * par_out->extradata and adapt muxers accordingly to get rid of this. */
843  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
844  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
845  if (ret < 0)
846  goto finish;
847  ost->bsf_extradata_updated[idx - 1] |= 1;
848  }
849 
850  /* send it to the next filter down the chain or to the muxer */
851  if (idx < ost->nb_bitstream_filters) {
852  /* HACK/FIXME! - See above */
853  if (!(ost->bsf_extradata_updated[idx] & 2)) {
854  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
855  if (ret < 0)
856  goto finish;
857  ost->bsf_extradata_updated[idx] |= 2;
858  }
859  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
860  if (ret < 0)
861  goto finish;
862  idx++;
863  } else
864  write_packet(of, pkt, ost);
865  }
866  } else
867  write_packet(of, pkt, ost);
868 
869 finish:
870  if (ret < 0 && ret != AVERROR_EOF) {
871  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
872  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
873  if(exit_on_error)
874  exit_program(1);
875  }
876 }
877 
879 {
880  OutputFile *of = output_files[ost->file_index];
881 
882  if (of->recording_time != INT64_MAX &&
884  AV_TIME_BASE_Q) >= 0) {
885  close_output_stream(ost);
886  return 0;
887  }
888  return 1;
889 }
890 
891 static void do_audio_out(OutputFile *of, OutputStream *ost,
892  AVFrame *frame)
893 {
894  AVCodecContext *enc = ost->enc_ctx;
895  AVPacket pkt;
896  int ret;
897 
898  av_init_packet(&pkt);
899  pkt.data = NULL;
900  pkt.size = 0;
901 
902  if (!check_recording_time(ost))
903  return;
904 
905  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
906  frame->pts = ost->sync_opts;
907  ost->sync_opts = frame->pts + frame->nb_samples;
908  ost->samples_encoded += frame->nb_samples;
909  ost->frames_encoded++;
910 
911  av_assert0(pkt.size || !pkt.data);
913  if (debug_ts) {
914  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
915  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
916  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
917  enc->time_base.num, enc->time_base.den);
918  }
919 
920  ret = avcodec_send_frame(enc, frame);
921  if (ret < 0)
922  goto error;
923 
924  while (1) {
925  ret = avcodec_receive_packet(enc, &pkt);
926  if (ret == AVERROR(EAGAIN))
927  break;
928  if (ret < 0)
929  goto error;
930 
931  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
932 
933  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
934 
935  if (debug_ts) {
936  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
937  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
938  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
939  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
940  }
941 
942  output_packet(of, &pkt, ost);
943  }
944 
945  return;
946 error:
947  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
948  exit_program(1);
949 }
950 
951 static void do_subtitle_out(OutputFile *of,
952  OutputStream *ost,
953  AVSubtitle *sub)
954 {
955  int subtitle_out_max_size = 1024 * 1024;
956  int subtitle_out_size, nb, i;
957  AVCodecContext *enc;
958  AVPacket pkt;
959  int64_t pts;
960 
961  if (sub->pts == AV_NOPTS_VALUE) {
962  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
963  if (exit_on_error)
964  exit_program(1);
965  return;
966  }
967 
968  enc = ost->enc_ctx;
969 
970  if (!subtitle_out) {
971  subtitle_out = av_malloc(subtitle_out_max_size);
972  if (!subtitle_out) {
973  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
974  exit_program(1);
975  }
976  }
977 
978  /* Note: DVB subtitle need one packet to draw them and one other
979  packet to clear them */
980  /* XXX: signal it in the codec context ? */
982  nb = 2;
983  else
984  nb = 1;
985 
986  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
987  pts = sub->pts;
988  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
989  pts -= output_files[ost->file_index]->start_time;
990  for (i = 0; i < nb; i++) {
991  unsigned save_num_rects = sub->num_rects;
992 
993  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
994  if (!check_recording_time(ost))
995  return;
996 
997  sub->pts = pts;
998  // start_display_time is required to be 0
999  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1000  sub->end_display_time -= sub->start_display_time;
1001  sub->start_display_time = 0;
1002  if (i == 1)
1003  sub->num_rects = 0;
1004 
1005  ost->frames_encoded++;
1006 
1007  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1008  subtitle_out_max_size, sub);
1009  if (i == 1)
1010  sub->num_rects = save_num_rects;
1011  if (subtitle_out_size < 0) {
1012  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1013  exit_program(1);
1014  }
1015 
1016  av_init_packet(&pkt);
1017  pkt.data = subtitle_out;
1018  pkt.size = subtitle_out_size;
1019  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1020  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1021  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1022  /* XXX: the pts correction is handled here. Maybe handling
1023  it in the codec would be better */
1024  if (i == 0)
1025  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1026  else
1027  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028  }
1029  pkt.dts = pkt.pts;
1030  output_packet(of, &pkt, ost);
1031  }
1032 }
1033 
1034 static void do_video_out(OutputFile *of,
1035  OutputStream *ost,
1036  AVFrame *next_picture,
1037  double sync_ipts)
1038 {
1039  int ret, format_video_sync;
1040  AVPacket pkt;
1041  AVCodecContext *enc = ost->enc_ctx;
1042  AVCodecParameters *mux_par = ost->st->codecpar;
1043  AVRational frame_rate;
1044  int nb_frames, nb0_frames, i;
1045  double delta, delta0;
1046  double duration = 0;
1047  int frame_size = 0;
1048  InputStream *ist = NULL;
1050 
1051  if (ost->source_index >= 0)
1052  ist = input_streams[ost->source_index];
1053 
1054  frame_rate = av_buffersink_get_frame_rate(filter);
1055  if (frame_rate.num > 0 && frame_rate.den > 0)
1056  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1057 
1058  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1059  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1060 
1061  if (!ost->filters_script &&
1062  !ost->filters &&
1063  next_picture &&
1064  ist &&
1065  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1066  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1067  }
1068 
1069  if (!next_picture) {
1070  //end, flushing
1071  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1072  ost->last_nb0_frames[1],
1073  ost->last_nb0_frames[2]);
1074  } else {
1075  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1076  delta = delta0 + duration;
1077 
1078  /* by default, we output a single frame */
1079  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1080  nb_frames = 1;
1081 
1082  format_video_sync = video_sync_method;
1083  if (format_video_sync == VSYNC_AUTO) {
1084  if(!strcmp(of->ctx->oformat->name, "avi")) {
1085  format_video_sync = VSYNC_VFR;
1086  } else
1087  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1088  if ( ist
1089  && format_video_sync == VSYNC_CFR
1090  && input_files[ist->file_index]->ctx->nb_streams == 1
1091  && input_files[ist->file_index]->input_ts_offset == 0) {
1092  format_video_sync = VSYNC_VSCFR;
1093  }
1094  if (format_video_sync == VSYNC_CFR && copy_ts) {
1095  format_video_sync = VSYNC_VSCFR;
1096  }
1097  }
1098  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1099 
1100  if (delta0 < 0 &&
1101  delta > 0 &&
1102  format_video_sync != VSYNC_PASSTHROUGH &&
1103  format_video_sync != VSYNC_DROP) {
1104  if (delta0 < -0.6) {
1105  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1106  } else
1107  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1108  sync_ipts = ost->sync_opts;
1109  duration += delta0;
1110  delta0 = 0;
1111  }
1112 
1113  switch (format_video_sync) {
1114  case VSYNC_VSCFR:
1115  if (ost->frame_number == 0 && delta0 >= 0.5) {
1116  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1117  delta = duration;
1118  delta0 = 0;
1119  ost->sync_opts = lrint(sync_ipts);
1120  }
1121  case VSYNC_CFR:
1122  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1123  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1124  nb_frames = 0;
1125  } else if (delta < -1.1)
1126  nb_frames = 0;
1127  else if (delta > 1.1) {
1128  nb_frames = lrintf(delta);
1129  if (delta0 > 1.1)
1130  nb0_frames = lrintf(delta0 - 0.6);
1131  }
1132  break;
1133  case VSYNC_VFR:
1134  if (delta <= -0.6)
1135  nb_frames = 0;
1136  else if (delta > 0.6)
1137  ost->sync_opts = lrint(sync_ipts);
1138  break;
1139  case VSYNC_DROP:
1140  case VSYNC_PASSTHROUGH:
1141  ost->sync_opts = lrint(sync_ipts);
1142  break;
1143  default:
1144  av_assert0(0);
1145  }
1146  }
1147 
1148  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1149  nb0_frames = FFMIN(nb0_frames, nb_frames);
1150 
1151  memmove(ost->last_nb0_frames + 1,
1152  ost->last_nb0_frames,
1153  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1154  ost->last_nb0_frames[0] = nb0_frames;
1155 
1156  if (nb0_frames == 0 && ost->last_dropped) {
1157  nb_frames_drop++;
1159  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1160  ost->frame_number, ost->st->index, ost->last_frame->pts);
1161  }
1162  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1163  if (nb_frames > dts_error_threshold * 30) {
1164  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1165  nb_frames_drop++;
1166  return;
1167  }
1168  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1169  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1170  if (nb_frames_dup > dup_warning) {
1171  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1172  dup_warning *= 10;
1173  }
1174  }
1175  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1176 
1177  /* duplicates frame if needed */
1178  for (i = 0; i < nb_frames; i++) {
1179  AVFrame *in_picture;
1180  av_init_packet(&pkt);
1181  pkt.data = NULL;
1182  pkt.size = 0;
1183 
1184  if (i < nb0_frames && ost->last_frame) {
1185  in_picture = ost->last_frame;
1186  } else
1187  in_picture = next_picture;
1188 
1189  if (!in_picture)
1190  return;
1191 
1192  in_picture->pts = ost->sync_opts;
1193 
1194 #if 1
1195  if (!check_recording_time(ost))
1196 #else
1197  if (ost->frame_number >= ost->max_frames)
1198 #endif
1199  return;
1200 
1201 #if FF_API_LAVF_FMT_RAWPICTURE
1202  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1203  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1204  /* raw pictures are written as AVPicture structure to
1205  avoid any copies. We support temporarily the older
1206  method. */
1207  if (in_picture->interlaced_frame)
1208  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1209  else
1210  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1211  pkt.data = (uint8_t *)in_picture;
1212  pkt.size = sizeof(AVPicture);
1213  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1214  pkt.flags |= AV_PKT_FLAG_KEY;
1215 
1216  output_packet(of, &pkt, ost);
1217  } else
1218 #endif
1219  {
1220  int forced_keyframe = 0;
1221  double pts_time;
1222 
1224  ost->top_field_first >= 0)
1225  in_picture->top_field_first = !!ost->top_field_first;
1226 
1227  if (in_picture->interlaced_frame) {
1228  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1229  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1230  else
1231  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1232  } else
1233  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1234 
1235  in_picture->quality = enc->global_quality;
1236  in_picture->pict_type = 0;
1237 
1238  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1239  in_picture->pts * av_q2d(enc->time_base) : NAN;
1240  if (ost->forced_kf_index < ost->forced_kf_count &&
1241  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1242  ost->forced_kf_index++;
1243  forced_keyframe = 1;
1244  } else if (ost->forced_keyframes_pexpr) {
1245  double res;
1246  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1249  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1255  res);
1256  if (res) {
1257  forced_keyframe = 1;
1263  }
1264 
1266  } else if ( ost->forced_keyframes
1267  && !strncmp(ost->forced_keyframes, "source", 6)
1268  && in_picture->key_frame==1) {
1269  forced_keyframe = 1;
1270  }
1271 
1272  if (forced_keyframe) {
1273  in_picture->pict_type = AV_PICTURE_TYPE_I;
1274  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1275  }
1276 
1278  if (debug_ts) {
1279  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1280  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1281  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1282  enc->time_base.num, enc->time_base.den);
1283  }
1284 
1285  ost->frames_encoded++;
1286 
1287  ret = avcodec_send_frame(enc, in_picture);
1288  if (ret < 0)
1289  goto error;
1290 
1291  while (1) {
1292  ret = avcodec_receive_packet(enc, &pkt);
1293  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1294  if (ret == AVERROR(EAGAIN))
1295  break;
1296  if (ret < 0)
1297  goto error;
1298 
1299  if (debug_ts) {
1300  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1301  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1302  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1303  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1304  }
1305 
1306  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1307  pkt.pts = ost->sync_opts;
1308 
1309  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1310 
1311  if (debug_ts) {
1312  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1313  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1314  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1315  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1316  }
1317 
1318  frame_size = pkt.size;
1319  output_packet(of, &pkt, ost);
1320 
1321  /* if two pass, output log */
1322  if (ost->logfile && enc->stats_out) {
1323  fprintf(ost->logfile, "%s", enc->stats_out);
1324  }
1325  }
1326  }
1327  ost->sync_opts++;
1328  /*
1329  * For video, number of frames in == number of packets out.
1330  * But there may be reordering, so we can't throw away frames on encoder
1331  * flush, we need to limit them here, before they go into encoder.
1332  */
1333  ost->frame_number++;
1334 
1335  if (vstats_filename && frame_size)
1336  do_video_stats(ost, frame_size);
1337  }
1338 
1339  if (!ost->last_frame)
1340  ost->last_frame = av_frame_alloc();
1341  av_frame_unref(ost->last_frame);
1342  if (next_picture && ost->last_frame)
1343  av_frame_ref(ost->last_frame, next_picture);
1344  else
1345  av_frame_free(&ost->last_frame);
1346 
1347  return;
1348 error:
1349  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1350  exit_program(1);
1351 }
1352 
1353 static double psnr(double d)
1354 {
1355  return -10.0 * log10(d);
1356 }
1357 
1359 {
1360  AVCodecContext *enc;
1361  int frame_number;
1362  double ti1, bitrate, avg_bitrate;
1363 
1364  /* this is executed just the first time do_video_stats is called */
1365  if (!vstats_file) {
1366  vstats_file = fopen(vstats_filename, "w");
1367  if (!vstats_file) {
1368  perror("fopen");
1369  exit_program(1);
1370  }
1371  }
1372 
1373  enc = ost->enc_ctx;
1374  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1375  frame_number = ost->st->nb_frames;
1376  if (vstats_version <= 1) {
1377  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1378  ost->quality / (float)FF_QP2LAMBDA);
1379  } else {
1380  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1381  ost->quality / (float)FF_QP2LAMBDA);
1382  }
1383 
1384  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1385  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1386 
1387  fprintf(vstats_file,"f_size= %6d ", frame_size);
1388  /* compute pts value */
1389  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1390  if (ti1 < 0.01)
1391  ti1 = 0.01;
1392 
1393  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1394  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1395  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1396  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1397  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1398  }
1399 }
1400 
1401 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1402 
1404 {
1405  OutputFile *of = output_files[ost->file_index];
1406  int i;
1407 
1409 
1410  if (of->shortest) {
1411  for (i = 0; i < of->ctx->nb_streams; i++)
1412  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1413  }
1414 }
1415 
1416 /**
1417  * Get and encode new output from any of the filtergraphs, without causing
1418  * activity.
1419  *
1420  * @return 0 for success, <0 for severe errors
1421  */
1422 static int reap_filters(int flush)
1423 {
1424  AVFrame *filtered_frame = NULL;
1425  int i;
1426 
1427  /* Reap all buffers present in the buffer sinks */
1428  for (i = 0; i < nb_output_streams; i++) {
1429  OutputStream *ost = output_streams[i];
1430  OutputFile *of = output_files[ost->file_index];
1432  AVCodecContext *enc = ost->enc_ctx;
1433  int ret = 0;
1434 
1435  if (!ost->filter || !ost->filter->graph->graph)
1436  continue;
1437  filter = ost->filter->filter;
1438 
1439  if (!ost->initialized) {
1440  char error[1024] = "";
1441  ret = init_output_stream(ost, error, sizeof(error));
1442  if (ret < 0) {
1443  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1444  ost->file_index, ost->index, error);
1445  exit_program(1);
1446  }
1447  }
1448 
1449  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1450  return AVERROR(ENOMEM);
1451  }
1452  filtered_frame = ost->filtered_frame;
1453 
1454  while (1) {
1455  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1456  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1458  if (ret < 0) {
1459  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1461  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1462  } else if (flush && ret == AVERROR_EOF) {
1464  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1465  }
1466  break;
1467  }
1468  if (ost->finished) {
1469  av_frame_unref(filtered_frame);
1470  continue;
1471  }
1472  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1473  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1474  AVRational filter_tb = av_buffersink_get_time_base(filter);
1475  AVRational tb = enc->time_base;
1476  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1477 
1478  tb.den <<= extra_bits;
1479  float_pts =
1480  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1481  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1482  float_pts /= 1 << extra_bits;
1483  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1484  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1485 
1486  filtered_frame->pts =
1487  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1488  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1489  }
1490  //if (ost->source_index >= 0)
1491  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1492 
1493  switch (av_buffersink_get_type(filter)) {
1494  case AVMEDIA_TYPE_VIDEO:
1495  if (!ost->frame_aspect_ratio.num)
1496  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 
1498  if (debug_ts) {
1499  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501  float_pts,
1502  enc->time_base.num, enc->time_base.den);
1503  }
1504 
1505  do_video_out(of, ost, filtered_frame, float_pts);
1506  break;
1507  case AVMEDIA_TYPE_AUDIO:
1508  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509  enc->channels != filtered_frame->channels) {
1511  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512  break;
1513  }
1514  do_audio_out(of, ost, filtered_frame);
1515  break;
1516  default:
1517  // TODO support subtitle filters
1518  av_assert0(0);
1519  }
1520 
1521  av_frame_unref(filtered_frame);
1522  }
1523  }
1524 
1525  return 0;
1526 }
1527 
1528 static void print_final_stats(int64_t total_size)
1529 {
1530  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531  uint64_t subtitle_size = 0;
1532  uint64_t data_size = 0;
1533  float percent = -1.0;
1534  int i, j;
1535  int pass1_used = 1;
1536 
1537  for (i = 0; i < nb_output_streams; i++) {
1538  OutputStream *ost = output_streams[i];
1539  switch (ost->enc_ctx->codec_type) {
1540  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543  default: other_size += ost->data_size; break;
1544  }
1545  extra_size += ost->enc_ctx->extradata_size;
1546  data_size += ost->data_size;
1549  pass1_used = 0;
1550  }
1551 
1552  if (data_size && total_size>0 && total_size >= data_size)
1553  percent = 100.0 * (total_size - data_size) / data_size;
1554 
1555  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556  video_size / 1024.0,
1557  audio_size / 1024.0,
1558  subtitle_size / 1024.0,
1559  other_size / 1024.0,
1560  extra_size / 1024.0);
1561  if (percent >= 0.0)
1562  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563  else
1564  av_log(NULL, AV_LOG_INFO, "unknown");
1565  av_log(NULL, AV_LOG_INFO, "\n");
1566 
1567  /* print verbose per-stream stats */
1568  for (i = 0; i < nb_input_files; i++) {
1569  InputFile *f = input_files[i];
1570  uint64_t total_packets = 0, total_size = 0;
1571 
1572  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573  i, f->ctx->filename);
1574 
1575  for (j = 0; j < f->nb_streams; j++) {
1576  InputStream *ist = input_streams[f->ist_index + j];
1577  enum AVMediaType type = ist->dec_ctx->codec_type;
1578 
1579  total_size += ist->data_size;
1580  total_packets += ist->nb_packets;
1581 
1582  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583  i, j, media_type_string(type));
1584  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585  ist->nb_packets, ist->data_size);
1586 
1587  if (ist->decoding_needed) {
1588  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589  ist->frames_decoded);
1590  if (type == AVMEDIA_TYPE_AUDIO)
1591  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592  av_log(NULL, AV_LOG_VERBOSE, "; ");
1593  }
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, "\n");
1596  }
1597 
1598  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599  total_packets, total_size);
1600  }
1601 
1602  for (i = 0; i < nb_output_files; i++) {
1603  OutputFile *of = output_files[i];
1604  uint64_t total_packets = 0, total_size = 0;
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607  i, of->ctx->filename);
1608 
1609  for (j = 0; j < of->ctx->nb_streams; j++) {
1610  OutputStream *ost = output_streams[of->ost_index + j];
1611  enum AVMediaType type = ost->enc_ctx->codec_type;
1612 
1613  total_size += ost->data_size;
1614  total_packets += ost->packets_written;
1615 
1616  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617  i, j, media_type_string(type));
1618  if (ost->encoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620  ost->frames_encoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627  ost->packets_written, ost->data_size);
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, "\n");
1630  }
1631 
1632  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633  total_packets, total_size);
1634  }
1635  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637  if (pass1_used) {
1638  av_log(NULL, AV_LOG_WARNING, "\n");
1639  } else {
1640  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1641  }
1642  }
1643 }
1644 
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 {
1647  char buf[1024];
1648  AVBPrint buf_script;
1649  OutputStream *ost;
1650  AVFormatContext *oc;
1651  int64_t total_size;
1652  AVCodecContext *enc;
1653  int frame_number, vid, i;
1654  double bitrate;
1655  double speed;
1656  int64_t pts = INT64_MIN + 1;
1657  static int64_t last_time = -1;
1658  static int qp_histogram[52];
1659  int hours, mins, secs, us;
1660  int ret;
1661  float t;
1662 
1663  if (!print_stats && !is_last_report && !progress_avio)
1664  return;
1665 
1666  if (!is_last_report) {
1667  if (last_time == -1) {
1668  last_time = cur_time;
1669  return;
1670  }
1671  if ((cur_time - last_time) < 500000)
1672  return;
1673  last_time = cur_time;
1674  }
1675 
1676  t = (cur_time-timer_start) / 1000000.0;
1677 
1678 
1679  oc = output_files[0]->ctx;
1680 
1681  total_size = avio_size(oc->pb);
1682  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683  total_size = avio_tell(oc->pb);
1684 
1685  buf[0] = '\0';
1686  vid = 0;
1687  av_bprint_init(&buf_script, 0, 1);
1688  for (i = 0; i < nb_output_streams; i++) {
1689  float q = -1;
1690  ost = output_streams[i];
1691  enc = ost->enc_ctx;
1692  if (!ost->stream_copy)
1693  q = ost->quality / (float) FF_QP2LAMBDA;
1694 
1695  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1697  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698  ost->file_index, ost->index, q);
1699  }
1700  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701  float fps;
1702 
1703  frame_number = ost->frame_number;
1704  fps = t > 1 ? frame_number / t : 0;
1705  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1706  frame_number, fps < 9.95, fps, q);
1707  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1709  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710  ost->file_index, ost->index, q);
1711  if (is_last_report)
1712  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1713  if (qp_hist) {
1714  int j;
1715  int qp = lrintf(q);
1716  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717  qp_histogram[qp]++;
1718  for (j = 0; j < 32; j++)
1719  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1720  }
1721 
1722  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723  int j;
1724  double error, error_sum = 0;
1725  double scale, scale_sum = 0;
1726  double p;
1727  char type[3] = { 'Y','U','V' };
1728  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1729  for (j = 0; j < 3; j++) {
1730  if (is_last_report) {
1731  error = enc->error[j];
1732  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733  } else {
1734  error = ost->error[j];
1735  scale = enc->width * enc->height * 255.0 * 255.0;
1736  }
1737  if (j)
1738  scale /= 4;
1739  error_sum += error;
1740  scale_sum += scale;
1741  p = psnr(error / scale);
1742  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1743  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744  ost->file_index, ost->index, type[j] | 32, p);
1745  }
1746  p = psnr(error_sum / scale_sum);
1747  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1748  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749  ost->file_index, ost->index, p);
1750  }
1751  vid = 1;
1752  }
1753  /* compute min output value */
1755  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1756  ost->st->time_base, AV_TIME_BASE_Q));
1757  if (is_last_report)
1758  nb_frames_drop += ost->last_dropped;
1759  }
1760 
1761  secs = FFABS(pts) / AV_TIME_BASE;
1762  us = FFABS(pts) % AV_TIME_BASE;
1763  mins = secs / 60;
1764  secs %= 60;
1765  hours = mins / 60;
1766  mins %= 60;
1767 
1768  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1769  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1770 
1771  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1772  "size=N/A time=");
1773  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774  "size=%8.0fkB time=", total_size / 1024.0);
1775  if (pts < 0)
1776  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1777  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1778  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1779  (100 * us) / AV_TIME_BASE);
1780 
1781  if (bitrate < 0) {
1782  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1783  av_bprintf(&buf_script, "bitrate=N/A\n");
1784  }else{
1785  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1786  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787  }
1788 
1789  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1792  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1793  hours, mins, secs, us);
1794 
1796  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1798  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1799  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1800 
1801  if (speed < 0) {
1802  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1803  av_bprintf(&buf_script, "speed=N/A\n");
1804  } else {
1805  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1806  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1807  }
1808 
1809  if (print_stats || is_last_report) {
1810  const char end = is_last_report ? '\n' : '\r';
1811  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1812  fprintf(stderr, "%s %c", buf, end);
1813  } else
1814  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1815 
1816  fflush(stderr);
1817  }
1818 
1819  if (progress_avio) {
1820  av_bprintf(&buf_script, "progress=%s\n",
1821  is_last_report ? "end" : "continue");
1822  avio_write(progress_avio, buf_script.str,
1823  FFMIN(buf_script.len, buf_script.size - 1));
1824  avio_flush(progress_avio);
1825  av_bprint_finalize(&buf_script, NULL);
1826  if (is_last_report) {
1827  if ((ret = avio_closep(&progress_avio)) < 0)
1829  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1830  }
1831  }
1832 
1833  if (is_last_report)
1834  print_final_stats(total_size);
1835 }
1836 
1837 static void flush_encoders(void)
1838 {
1839  int i, ret;
1840 
1841  for (i = 0; i < nb_output_streams; i++) {
1842  OutputStream *ost = output_streams[i];
1843  AVCodecContext *enc = ost->enc_ctx;
1844  OutputFile *of = output_files[ost->file_index];
1845 
1846  if (!ost->encoding_needed)
1847  continue;
1848 
1849  // Try to enable encoding with no input frames.
1850  // Maybe we should just let encoding fail instead.
1851  if (!ost->initialized) {
1852  FilterGraph *fg = ost->filter->graph;
1853  char error[1024] = "";
1854 
1856  "Finishing stream %d:%d without any data written to it.\n",
1857  ost->file_index, ost->st->index);
1858 
1859  if (ost->filter && !fg->graph) {
1860  int x;
1861  for (x = 0; x < fg->nb_inputs; x++) {
1862  InputFilter *ifilter = fg->inputs[x];
1863  if (ifilter->format < 0) {
1864  AVCodecParameters *par = ifilter->ist->st->codecpar;
1865  // We never got any input. Set a fake format, which will
1866  // come from libavformat.
1867  ifilter->format = par->format;
1868  ifilter->sample_rate = par->sample_rate;
1869  ifilter->channels = par->channels;
1870  ifilter->channel_layout = par->channel_layout;
1871  ifilter->width = par->width;
1872  ifilter->height = par->height;
1873  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1874  }
1875  }
1876 
1878  continue;
1879 
1880  ret = configure_filtergraph(fg);
1881  if (ret < 0) {
1882  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1883  exit_program(1);
1884  }
1885 
1886  finish_output_stream(ost);
1887  }
1888 
1889  ret = init_output_stream(ost, error, sizeof(error));
1890  if (ret < 0) {
1891  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1892  ost->file_index, ost->index, error);
1893  exit_program(1);
1894  }
1895  }
1896 
1897  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1898  continue;
1899 #if FF_API_LAVF_FMT_RAWPICTURE
1900  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1901  continue;
1902 #endif
1903 
1905  continue;
1906 
1907  for (;;) {
1908  const char *desc = NULL;
1909  AVPacket pkt;
1910  int pkt_size;
1911 
1912  switch (enc->codec_type) {
1913  case AVMEDIA_TYPE_AUDIO:
1914  desc = "audio";
1915  break;
1916  case AVMEDIA_TYPE_VIDEO:
1917  desc = "video";
1918  break;
1919  default:
1920  av_assert0(0);
1921  }
1922 
1923  av_init_packet(&pkt);
1924  pkt.data = NULL;
1925  pkt.size = 0;
1926 
1928 
1929  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1930  ret = avcodec_send_frame(enc, NULL);
1931  if (ret < 0) {
1932  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1933  desc,
1934  av_err2str(ret));
1935  exit_program(1);
1936  }
1937  }
1938 
1939  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1940  if (ret < 0 && ret != AVERROR_EOF) {
1941  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1942  desc,
1943  av_err2str(ret));
1944  exit_program(1);
1945  }
1946  if (ost->logfile && enc->stats_out) {
1947  fprintf(ost->logfile, "%s", enc->stats_out);
1948  }
1949  if (ret == AVERROR_EOF) {
1950  break;
1951  }
1952  if (ost->finished & MUXER_FINISHED) {
1953  av_packet_unref(&pkt);
1954  continue;
1955  }
1956  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1957  pkt_size = pkt.size;
1958  output_packet(of, &pkt, ost);
1960  do_video_stats(ost, pkt_size);
1961  }
1962  }
1963  }
1964 }
1965 
1966 /*
1967  * Check whether a packet from ist should be written into ost at this time
1968  */
1970 {
1971  OutputFile *of = output_files[ost->file_index];
1972  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1973 
1974  if (ost->source_index != ist_index)
1975  return 0;
1976 
1977  if (ost->finished)
1978  return 0;
1979 
1980  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1981  return 0;
1982 
1983  return 1;
1984 }
1985 
1986 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1987 {
1988  OutputFile *of = output_files[ost->file_index];
1989  InputFile *f = input_files [ist->file_index];
1990  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1991  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1992  AVPicture pict;
1993  AVPacket opkt;
1994 
1995  av_init_packet(&opkt);
1996 
1997  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1999  return;
2000 
2001  if (!ost->frame_number && !ost->copy_prior_start) {
2002  int64_t comp_start = start_time;
2003  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2004  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2005  if (pkt->pts == AV_NOPTS_VALUE ?
2006  ist->pts < comp_start :
2007  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2008  return;
2009  }
2010 
2011  if (of->recording_time != INT64_MAX &&
2012  ist->pts >= of->recording_time + start_time) {
2013  close_output_stream(ost);
2014  return;
2015  }
2016 
2017  if (f->recording_time != INT64_MAX) {
2018  start_time = f->ctx->start_time;
2019  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2020  start_time += f->start_time;
2021  if (ist->pts >= f->recording_time + start_time) {
2022  close_output_stream(ost);
2023  return;
2024  }
2025  }
2026 
2027  /* force the input stream PTS */
2028  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2029  ost->sync_opts++;
2030 
2031  if (pkt->pts != AV_NOPTS_VALUE)
2032  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2033  else
2034  opkt.pts = AV_NOPTS_VALUE;
2035 
2036  if (pkt->dts == AV_NOPTS_VALUE)
2037  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2038  else
2039  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2040  opkt.dts -= ost_tb_start_time;
2041 
2042  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2044  if(!duration)
2045  duration = ist->dec_ctx->frame_size;
2046  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2048  ost->mux_timebase) - ost_tb_start_time;
2049  }
2050 
2051  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2052 
2053  opkt.flags = pkt->flags;
2054  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2055  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2056  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2057  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2058  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2059  ) {
2060  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2061  &opkt.data, &opkt.size,
2062  pkt->data, pkt->size,
2064  if (ret < 0) {
2065  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2066  av_err2str(ret));
2067  exit_program(1);
2068  }
2069  if (ret) {
2070  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2071  if (!opkt.buf)
2072  exit_program(1);
2073  }
2074  } else {
2075  opkt.data = pkt->data;
2076  opkt.size = pkt->size;
2077  }
2078  av_copy_packet_side_data(&opkt, pkt);
2079 
2080 #if FF_API_LAVF_FMT_RAWPICTURE
2081  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2082  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2083  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2084  /* store AVPicture in AVPacket, as expected by the output format */
2085  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2086  if (ret < 0) {
2087  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2088  av_err2str(ret));
2089  exit_program(1);
2090  }
2091  opkt.data = (uint8_t *)&pict;
2092  opkt.size = sizeof(AVPicture);
2093  opkt.flags |= AV_PKT_FLAG_KEY;
2094  }
2095 #endif
2096 
2097  output_packet(of, &opkt, ost);
2098 }
2099 
2101 {
2102  AVCodecContext *dec = ist->dec_ctx;
2103 
2104  if (!dec->channel_layout) {
2105  char layout_name[256];
2106 
2107  if (dec->channels > ist->guess_layout_max)
2108  return 0;
2110  if (!dec->channel_layout)
2111  return 0;
2112  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2113  dec->channels, dec->channel_layout);
2114  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2115  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2116  }
2117  return 1;
2118 }
2119 
2120 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2121 {
2122  if (*got_output || ret<0)
2123  decode_error_stat[ret<0] ++;
2124 
2125  if (ret < 0 && exit_on_error)
2126  exit_program(1);
2127 
2128  if (exit_on_error && *got_output && ist) {
2130  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2131  exit_program(1);
2132  }
2133  }
2134 }
2135 
2136 // Filters can be configured only if the formats of all inputs are known.
2138 {
2139  int i;
2140  for (i = 0; i < fg->nb_inputs; i++) {
2141  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2142  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2143  return 0;
2144  }
2145  return 1;
2146 }
2147 
2149 {
2150  FilterGraph *fg = ifilter->graph;
2151  int need_reinit, ret, i;
2152 
2153  /* determine if the parameters for this input changed */
2154  need_reinit = ifilter->format != frame->format;
2155  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2156  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2157  need_reinit = 1;
2158 
2159  switch (ifilter->ist->st->codecpar->codec_type) {
2160  case AVMEDIA_TYPE_AUDIO:
2161  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2162  ifilter->channels != frame->channels ||
2163  ifilter->channel_layout != frame->channel_layout;
2164  break;
2165  case AVMEDIA_TYPE_VIDEO:
2166  need_reinit |= ifilter->width != frame->width ||
2167  ifilter->height != frame->height;
2168  break;
2169  }
2170 
2171  if (need_reinit) {
2172  ret = ifilter_parameters_from_frame(ifilter, frame);
2173  if (ret < 0)
2174  return ret;
2175  }
2176 
2177  /* (re)init the graph if possible, otherwise buffer the frame and return */
2178  if (need_reinit || !fg->graph) {
2179  for (i = 0; i < fg->nb_inputs; i++) {
2180  if (!ifilter_has_all_input_formats(fg)) {
2181  AVFrame *tmp = av_frame_clone(frame);
2182  if (!tmp)
2183  return AVERROR(ENOMEM);
2184  av_frame_unref(frame);
2185 
2186  if (!av_fifo_space(ifilter->frame_queue)) {
2187  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2188  if (ret < 0) {
2189  av_frame_free(&tmp);
2190  return ret;
2191  }
2192  }
2193  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2194  return 0;
2195  }
2196  }
2197 
2198  ret = reap_filters(1);
2199  if (ret < 0 && ret != AVERROR_EOF) {
2200  char errbuf[128];
2201  av_strerror(ret, errbuf, sizeof(errbuf));
2202 
2203  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2204  return ret;
2205  }
2206 
2207  ret = configure_filtergraph(fg);
2208  if (ret < 0) {
2209  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2210  return ret;
2211  }
2212  }
2213 
2215  if (ret < 0) {
2216  if (ret != AVERROR_EOF)
2217  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2218  return ret;
2219  }
2220 
2221  return 0;
2222 }
2223 
2224 static int ifilter_send_eof(InputFilter *ifilter)
2225 {
2226  int i, j, ret;
2227 
2228  ifilter->eof = 1;
2229 
2230  if (ifilter->filter) {
2232  if (ret < 0)
2233  return ret;
2234  } else {
2235  // the filtergraph was never configured
2236  FilterGraph *fg = ifilter->graph;
2237  for (i = 0; i < fg->nb_inputs; i++)
2238  if (!fg->inputs[i]->eof)
2239  break;
2240  if (i == fg->nb_inputs) {
2241  // All the input streams have finished without the filtergraph
2242  // ever being configured.
2243  // Mark the output streams as finished.
2244  for (j = 0; j < fg->nb_outputs; j++)
2245  finish_output_stream(fg->outputs[j]->ost);
2246  }
2247  }
2248 
2249  return 0;
2250 }
2251 
2252 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2253 // There is the following difference: if you got a frame, you must call
2254 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2255 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2256 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2257 {
2258  int ret;
2259 
2260  *got_frame = 0;
2261 
2262  if (pkt) {
2263  ret = avcodec_send_packet(avctx, pkt);
2264  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2265  // decoded frames with avcodec_receive_frame() until done.
2266  if (ret < 0 && ret != AVERROR_EOF)
2267  return ret;
2268  }
2269 
2270  ret = avcodec_receive_frame(avctx, frame);
2271  if (ret < 0 && ret != AVERROR(EAGAIN))
2272  return ret;
2273  if (ret >= 0)
2274  *got_frame = 1;
2275 
2276  return 0;
2277 }
2278 
2279 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2280 {
2281  int i, ret;
2282  AVFrame *f;
2283 
2284  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2285  for (i = 0; i < ist->nb_filters; i++) {
2286  if (i < ist->nb_filters - 1) {
2287  f = ist->filter_frame;
2288  ret = av_frame_ref(f, decoded_frame);
2289  if (ret < 0)
2290  break;
2291  } else
2292  f = decoded_frame;
2293  ret = ifilter_send_frame(ist->filters[i], f);
2294  if (ret == AVERROR_EOF)
2295  ret = 0; /* ignore */
2296  if (ret < 0) {
2298  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2299  break;
2300  }
2301  }
2302  return ret;
2303 }
2304 
2305 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2306  int *decode_failed)
2307 {
2308  AVFrame *decoded_frame;
2309  AVCodecContext *avctx = ist->dec_ctx;
2310  int ret, err = 0;
2311  AVRational decoded_frame_tb;
2312 
2313  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2314  return AVERROR(ENOMEM);
2315  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2316  return AVERROR(ENOMEM);
2317  decoded_frame = ist->decoded_frame;
2318 
2320  ret = decode(avctx, decoded_frame, got_output, pkt);
2321  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2322  if (ret < 0)
2323  *decode_failed = 1;
2324 
2325  if (ret >= 0 && avctx->sample_rate <= 0) {
2326  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2327  ret = AVERROR_INVALIDDATA;
2328  }
2329 
2330  if (ret != AVERROR_EOF)
2331  check_decode_result(ist, got_output, ret);
2332 
2333  if (!*got_output || ret < 0)
2334  return ret;
2335 
2336  ist->samples_decoded += decoded_frame->nb_samples;
2337  ist->frames_decoded++;
2338 
2339 #if 1
2340  /* increment next_dts to use for the case where the input stream does not
2341  have timestamps or there are multiple frames in the packet */
2342  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2343  avctx->sample_rate;
2344  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2345  avctx->sample_rate;
2346 #endif
2347 
2348  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2349  decoded_frame_tb = ist->st->time_base;
2350  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2351  decoded_frame->pts = pkt->pts;
2352  decoded_frame_tb = ist->st->time_base;
2353  }else {
2354  decoded_frame->pts = ist->dts;
2355  decoded_frame_tb = AV_TIME_BASE_Q;
2356  }
2357  if (decoded_frame->pts != AV_NOPTS_VALUE)
2358  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2359  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2360  (AVRational){1, avctx->sample_rate});
2361  ist->nb_samples = decoded_frame->nb_samples;
2362  err = send_frame_to_filters(ist, decoded_frame);
2363 
2364  av_frame_unref(ist->filter_frame);
2365  av_frame_unref(decoded_frame);
2366  return err < 0 ? err : ret;
2367 }
2368 
2369 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2370  int *decode_failed)
2371 {
2372  AVFrame *decoded_frame;
2373  int i, ret = 0, err = 0;
2374  int64_t best_effort_timestamp;
2375  int64_t dts = AV_NOPTS_VALUE;
2376  AVPacket avpkt;
2377 
2378  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2379  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2380  // skip the packet.
2381  if (!eof && pkt && pkt->size == 0)
2382  return 0;
2383 
2384  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2385  return AVERROR(ENOMEM);
2386  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2387  return AVERROR(ENOMEM);
2388  decoded_frame = ist->decoded_frame;
2389  if (ist->dts != AV_NOPTS_VALUE)
2390  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2391  if (pkt) {
2392  avpkt = *pkt;
2393  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2394  }
2395 
2396  // The old code used to set dts on the drain packet, which does not work
2397  // with the new API anymore.
2398  if (eof) {
2399  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2400  if (!new)
2401  return AVERROR(ENOMEM);
2402  ist->dts_buffer = new;
2403  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2404  }
2405 
2407  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2408  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2409  if (ret < 0)
2410  *decode_failed = 1;
2411 
2412  // The following line may be required in some cases where there is no parser
2413  // or the parser does not has_b_frames correctly
2414  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2415  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2416  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2417  } else
2419  "video_delay is larger in decoder than demuxer %d > %d.\n"
2420  "If you want to help, upload a sample "
2421  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2422  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2423  ist->dec_ctx->has_b_frames,
2424  ist->st->codecpar->video_delay);
2425  }
2426 
2427  if (ret != AVERROR_EOF)
2428  check_decode_result(ist, got_output, ret);
2429 
2430  if (*got_output && ret >= 0) {
2431  if (ist->dec_ctx->width != decoded_frame->width ||
2432  ist->dec_ctx->height != decoded_frame->height ||
2433  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2434  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2435  decoded_frame->width,
2436  decoded_frame->height,
2437  decoded_frame->format,
2438  ist->dec_ctx->width,
2439  ist->dec_ctx->height,
2440  ist->dec_ctx->pix_fmt);
2441  }
2442  }
2443 
2444  if (!*got_output || ret < 0)
2445  return ret;
2446 
2447  if(ist->top_field_first>=0)
2448  decoded_frame->top_field_first = ist->top_field_first;
2449 
2450  ist->frames_decoded++;
2451 
2452  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2453  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2454  if (err < 0)
2455  goto fail;
2456  }
2457  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2458 
2459  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2460 
2461  if (ist->framerate.num)
2462  best_effort_timestamp = ist->cfr_next_pts++;
2463 
2464  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2465  best_effort_timestamp = ist->dts_buffer[0];
2466 
2467  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2468  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2469  ist->nb_dts_buffer--;
2470  }
2471 
2472  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2473  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2474 
2475  if (ts != AV_NOPTS_VALUE)
2476  ist->next_pts = ist->pts = ts;
2477  }
2478 
2479  if (debug_ts) {
2480  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2481  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2482  ist->st->index, av_ts2str(decoded_frame->pts),
2483  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2484  best_effort_timestamp,
2485  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2486  decoded_frame->key_frame, decoded_frame->pict_type,
2487  ist->st->time_base.num, ist->st->time_base.den);
2488  }
2489 
2490  if (ist->st->sample_aspect_ratio.num)
2491  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2492 
2493  err = send_frame_to_filters(ist, decoded_frame);
2494 
2495 fail:
2497  av_frame_unref(decoded_frame);
2498  return err < 0 ? err : ret;
2499 }
2500 
2501 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2502  int *decode_failed)
2503 {
2504  AVSubtitle subtitle;
2505  int free_sub = 1;
2506  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2507  &subtitle, got_output, pkt);
2508 
2509  check_decode_result(NULL, got_output, ret);
2510 
2511  if (ret < 0 || !*got_output) {
2512  *decode_failed = 1;
2513  if (!pkt->size)
2514  sub2video_flush(ist);
2515  return ret;
2516  }
2517 
2518  if (ist->fix_sub_duration) {
2519  int end = 1;
2520  if (ist->prev_sub.got_output) {
2521  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2522  1000, AV_TIME_BASE);
2523  if (end < ist->prev_sub.subtitle.end_display_time) {
2524  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2525  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2527  end <= 0 ? ", dropping it" : "");
2529  }
2530  }
2531  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2532  FFSWAP(int, ret, ist->prev_sub.ret);
2533  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2534  if (end <= 0)
2535  goto out;
2536  }
2537 
2538  if (!*got_output)
2539  return ret;
2540 
2541  if (ist->sub2video.frame) {
2542  sub2video_update(ist, &subtitle);
2543  } else if (ist->nb_filters) {
2544  if (!ist->sub2video.sub_queue)
2545  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2546  if (!ist->sub2video.sub_queue)
2547  exit_program(1);
2548  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2550  if (ret < 0)
2551  exit_program(1);
2552  }
2553  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2554  free_sub = 0;
2555  }
2556 
2557  if (!subtitle.num_rects)
2558  goto out;
2559 
2560  ist->frames_decoded++;
2561 
2562  for (i = 0; i < nb_output_streams; i++) {
2563  OutputStream *ost = output_streams[i];
2564 
2565  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2566  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2567  continue;
2568 
2569  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2570  }
2571 
2572 out:
2573  if (free_sub)
2574  avsubtitle_free(&subtitle);
2575  return ret;
2576 }
2577 
2579 {
2580  int i, ret;
2581  for (i = 0; i < ist->nb_filters; i++) {
2582  ret = ifilter_send_eof(ist->filters[i]);
2583  if (ret < 0)
2584  return ret;
2585  }
2586  return 0;
2587 }
2588 
2589 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2590 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2591 {
2592  int ret = 0, i;
2593  int repeating = 0;
2594  int eof_reached = 0;
2595 
2596  AVPacket avpkt;
2597  if (!ist->saw_first_ts) {
2598  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2599  ist->pts = 0;
2600  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2601  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2602  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2603  }
2604  ist->saw_first_ts = 1;
2605  }
2606 
2607  if (ist->next_dts == AV_NOPTS_VALUE)
2608  ist->next_dts = ist->dts;
2609  if (ist->next_pts == AV_NOPTS_VALUE)
2610  ist->next_pts = ist->pts;
2611 
2612  if (!pkt) {
2613  /* EOF handling */
2614  av_init_packet(&avpkt);
2615  avpkt.data = NULL;
2616  avpkt.size = 0;
2617  } else {
2618  avpkt = *pkt;
2619  }
2620 
2621  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2622  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2623  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2624  ist->next_pts = ist->pts = ist->dts;
2625  }
2626 
2627  // while we have more to decode or while the decoder did output something on EOF
2628  while (ist->decoding_needed) {
2629  int64_t duration = 0;
2630  int got_output = 0;
2631  int decode_failed = 0;
2632 
2633  ist->pts = ist->next_pts;
2634  ist->dts = ist->next_dts;
2635 
2636  switch (ist->dec_ctx->codec_type) {
2637  case AVMEDIA_TYPE_AUDIO:
2638  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2639  &decode_failed);
2640  break;
2641  case AVMEDIA_TYPE_VIDEO:
2642  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2643  &decode_failed);
2644  if (!repeating || !pkt || got_output) {
2645  if (pkt && pkt->duration) {
2646  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2647  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2649  duration = ((int64_t)AV_TIME_BASE *
2650  ist->dec_ctx->framerate.den * ticks) /
2652  }
2653 
2654  if(ist->dts != AV_NOPTS_VALUE && duration) {
2655  ist->next_dts += duration;
2656  }else
2657  ist->next_dts = AV_NOPTS_VALUE;
2658  }
2659 
2660  if (got_output)
2661  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2662  break;
2663  case AVMEDIA_TYPE_SUBTITLE:
2664  if (repeating)
2665  break;
2666  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2667  if (!pkt && ret >= 0)
2668  ret = AVERROR_EOF;
2669  break;
2670  default:
2671  return -1;
2672  }
2673 
2674  if (ret == AVERROR_EOF) {
2675  eof_reached = 1;
2676  break;
2677  }
2678 
2679  if (ret < 0) {
2680  if (decode_failed) {
2681  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2682  ist->file_index, ist->st->index, av_err2str(ret));
2683  } else {
2684  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2685  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2686  }
2687  if (!decode_failed || exit_on_error)
2688  exit_program(1);
2689  break;
2690  }
2691 
2692  if (got_output)
2693  ist->got_output = 1;
2694 
2695  if (!got_output)
2696  break;
2697 
2698  // During draining, we might get multiple output frames in this loop.
2699  // ffmpeg.c does not drain the filter chain on configuration changes,
2700  // which means if we send multiple frames at once to the filters, and
2701  // one of those frames changes configuration, the buffered frames will
2702  // be lost. This can upset certain FATE tests.
2703  // Decode only 1 frame per call on EOF to appease these FATE tests.
2704  // The ideal solution would be to rewrite decoding to use the new
2705  // decoding API in a better way.
2706  if (!pkt)
2707  break;
2708 
2709  repeating = 1;
2710  }
2711 
2712  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2713  /* except when looping we need to flush but not to send an EOF */
2714  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2715  int ret = send_filter_eof(ist);
2716  if (ret < 0) {
2717  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2718  exit_program(1);
2719  }
2720  }
2721 
2722  /* handle stream copy */
2723  if (!ist->decoding_needed) {
2724  ist->dts = ist->next_dts;
2725  switch (ist->dec_ctx->codec_type) {
2726  case AVMEDIA_TYPE_AUDIO:
2727  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2728  ist->dec_ctx->sample_rate;
2729  break;
2730  case AVMEDIA_TYPE_VIDEO:
2731  if (ist->framerate.num) {
2732  // TODO: Remove work-around for c99-to-c89 issue 7
2733  AVRational time_base_q = AV_TIME_BASE_Q;
2734  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2735  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2736  } else if (pkt->duration) {
2737  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2738  } else if(ist->dec_ctx->framerate.num != 0) {
2739  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2740  ist->next_dts += ((int64_t)AV_TIME_BASE *
2741  ist->dec_ctx->framerate.den * ticks) /
2743  }
2744  break;
2745  }
2746  ist->pts = ist->dts;
2747  ist->next_pts = ist->next_dts;
2748  }
2749  for (i = 0; pkt && i < nb_output_streams; i++) {
2750  OutputStream *ost = output_streams[i];
2751 
2752  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2753  continue;
2754 
2755  do_streamcopy(ist, ost, pkt);
2756  }
2757 
2758  return !eof_reached;
2759 }
2760 
2761 static void print_sdp(void)
2762 {
2763  char sdp[16384];
2764  int i;
2765  int j;
2766  AVIOContext *sdp_pb;
2767  AVFormatContext **avc;
2768 
2769  for (i = 0; i < nb_output_files; i++) {
2770  if (!output_files[i]->header_written)
2771  return;
2772  }
2773 
2774  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2775  if (!avc)
2776  exit_program(1);
2777  for (i = 0, j = 0; i < nb_output_files; i++) {
2778  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2779  avc[j] = output_files[i]->ctx;
2780  j++;
2781  }
2782  }
2783 
2784  if (!j)
2785  goto fail;
2786 
2787  av_sdp_create(avc, j, sdp, sizeof(sdp));
2788 
2789  if (!sdp_filename) {
2790  printf("SDP:\n%s\n", sdp);
2791  fflush(stdout);
2792  } else {
2793  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2794  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2795  } else {
2796  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2797  avio_closep(&sdp_pb);
2799  }
2800  }
2801 
2802 fail:
2803  av_freep(&avc);
2804 }
2805 
2807 {
2808  int i;
2809  for (i = 0; hwaccels[i].name; i++)
2810  if (hwaccels[i].pix_fmt == pix_fmt)
2811  return &hwaccels[i];
2812  return NULL;
2813 }
2814 
2816 {
2817  InputStream *ist = s->opaque;
2818  const enum AVPixelFormat *p;
2819  int ret;
2820 
2821  for (p = pix_fmts; *p != -1; p++) {
2823  const HWAccel *hwaccel;
2824 
2825  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2826  break;
2827 
2828  hwaccel = get_hwaccel(*p);
2829  if (!hwaccel ||
2830  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2831  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2832  continue;
2833 
2834  ret = hwaccel->init(s);
2835  if (ret < 0) {
2836  if (ist->hwaccel_id == hwaccel->id) {
2838  "%s hwaccel requested for input stream #%d:%d, "
2839  "but cannot be initialized.\n", hwaccel->name,
2840  ist->file_index, ist->st->index);
2841  return AV_PIX_FMT_NONE;
2842  }
2843  continue;
2844  }
2845 
2846  if (ist->hw_frames_ctx) {
2848  if (!s->hw_frames_ctx)
2849  return AV_PIX_FMT_NONE;
2850  }
2851 
2852  ist->active_hwaccel_id = hwaccel->id;
2853  ist->hwaccel_pix_fmt = *p;
2854  break;
2855  }
2856 
2857  return *p;
2858 }
2859 
2861 {
2862  InputStream *ist = s->opaque;
2863 
2864  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2865  return ist->hwaccel_get_buffer(s, frame, flags);
2866 
2867  return avcodec_default_get_buffer2(s, frame, flags);
2868 }
2869 
2870 static int init_input_stream(int ist_index, char *error, int error_len)
2871 {
2872  int ret;
2873  InputStream *ist = input_streams[ist_index];
2874 
2875  if (ist->decoding_needed) {
2876  AVCodec *codec = ist->dec;
2877  if (!codec) {
2878  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2879  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2880  return AVERROR(EINVAL);
2881  }
2882 
2883  ist->dec_ctx->opaque = ist;
2884  ist->dec_ctx->get_format = get_format;
2885  ist->dec_ctx->get_buffer2 = get_buffer;
2886  ist->dec_ctx->thread_safe_callbacks = 1;
2887 
2888  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2889  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2890  (ist->decoding_needed & DECODING_FOR_OST)) {
2891  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2893  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2894  }
2895 
2896  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2897 
2898  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2899  * audio, and video decoders such as cuvid or mediacodec */
2901 
2902  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2903  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2904  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2905  if (ret == AVERROR_EXPERIMENTAL)
2906  abort_codec_experimental(codec, 0);
2907 
2908  snprintf(error, error_len,
2909  "Error while opening decoder for input stream "
2910  "#%d:%d : %s",
2911  ist->file_index, ist->st->index, av_err2str(ret));
2912  return ret;
2913  }
2915  }
2916 
2917  ist->next_pts = AV_NOPTS_VALUE;
2918  ist->next_dts = AV_NOPTS_VALUE;
2919 
2920  return 0;
2921 }
2922 
2924 {
2925  if (ost->source_index >= 0)
2926  return input_streams[ost->source_index];
2927  return NULL;
2928 }
2929 
2930 static int compare_int64(const void *a, const void *b)
2931 {
2932  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2933 }
2934 
2935 /* open the muxer when all the streams are initialized */
2936 static int check_init_output_file(OutputFile *of, int file_index)
2937 {
2938  int ret, i;
2939 
2940  for (i = 0; i < of->ctx->nb_streams; i++) {
2941  OutputStream *ost = output_streams[of->ost_index + i];
2942  if (!ost->initialized)
2943  return 0;
2944  }
2945 
2946  of->ctx->interrupt_callback = int_cb;
2947 
2948  ret = avformat_write_header(of->ctx, &of->opts);
2949  if (ret < 0) {
2951  "Could not write header for output file #%d "
2952  "(incorrect codec parameters ?): %s\n",
2953  file_index, av_err2str(ret));
2954  return ret;
2955  }
2956  //assert_avoptions(of->opts);
2957  of->header_written = 1;
2958 
2959  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2960 
2961  if (sdp_filename || want_sdp)
2962  print_sdp();
2963 
2964  /* flush the muxing queues */
2965  for (i = 0; i < of->ctx->nb_streams; i++) {
2966  OutputStream *ost = output_streams[of->ost_index + i];
2967 
2968  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2969  if (!av_fifo_size(ost->muxing_queue))
2970  ost->mux_timebase = ost->st->time_base;
2971 
2972  while (av_fifo_size(ost->muxing_queue)) {
2973  AVPacket pkt;
2974  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2975  write_packet(of, &pkt, ost);
2976  }
2977  }
2978 
2979  return 0;
2980 }
2981 
2983 {
2984  AVBSFContext *ctx;
2985  int i, ret;
2986 
2987  if (!ost->nb_bitstream_filters)
2988  return 0;
2989 
2990  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2991  ctx = ost->bsf_ctx[i];
2992 
2993  ret = avcodec_parameters_copy(ctx->par_in,
2994  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2995  if (ret < 0)
2996  return ret;
2997 
2998  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2999 
3000  ret = av_bsf_init(ctx);
3001  if (ret < 0) {
3002  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3003  ost->bsf_ctx[i]->filter->name);
3004  return ret;
3005  }
3006  }
3007 
3008  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3009  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3010  if (ret < 0)
3011  return ret;
3012 
3013  ost->st->time_base = ctx->time_base_out;
3014 
3015  return 0;
3016 }
3017 
3019 {
3020  OutputFile *of = output_files[ost->file_index];
3021  InputStream *ist = get_input_stream(ost);
3022  AVCodecParameters *par_dst = ost->st->codecpar;
3023  AVCodecParameters *par_src = ost->ref_par;
3024  AVRational sar;
3025  int i, ret;
3026  uint32_t codec_tag = par_dst->codec_tag;
3027 
3028  av_assert0(ist && !ost->filter);
3029 
3030  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3031  if (ret >= 0)
3032  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3033  if (ret < 0) {
3035  "Error setting up codec context options.\n");
3036  return ret;
3037  }
3039 
3040  if (!codec_tag) {
3041  unsigned int codec_tag_tmp;
3042  if (!of->ctx->oformat->codec_tag ||
3043  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3044  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3045  codec_tag = par_src->codec_tag;
3046  }
3047 
3048  ret = avcodec_parameters_copy(par_dst, par_src);
3049  if (ret < 0)
3050  return ret;
3051 
3052  par_dst->codec_tag = codec_tag;
3053 
3054  if (!ost->frame_rate.num)
3055  ost->frame_rate = ist->framerate;
3056  ost->st->avg_frame_rate = ost->frame_rate;
3057 
3059  if (ret < 0)
3060  return ret;
3061 
3062  // copy timebase while removing common factors
3063  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3065 
3066  // copy estimated duration as a hint to the muxer
3067  if (ost->st->duration <= 0 && ist->st->duration > 0)
3068  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3069 
3070  // copy disposition
3071  ost->st->disposition = ist->st->disposition;
3072 
3073  if (ist->st->nb_side_data) {
3074  for (i = 0; i < ist->st->nb_side_data; i++) {
3075  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3076  uint8_t *dst_data;
3077 
3078  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3079  if (!dst_data)
3080  return AVERROR(ENOMEM);
3081  memcpy(dst_data, sd_src->data, sd_src->size);
3082  }
3083  }
3084 
3085  if (ost->rotate_overridden) {
3087  sizeof(int32_t) * 9);
3088  if (sd)
3089  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3090  }
3091 
3092  ost->parser = av_parser_init(par_dst->codec_id);
3093  ost->parser_avctx = avcodec_alloc_context3(NULL);
3094  if (!ost->parser_avctx)
3095  return AVERROR(ENOMEM);
3096 
3097  switch (par_dst->codec_type) {
3098  case AVMEDIA_TYPE_AUDIO:
3099  if (audio_volume != 256) {
3100  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3101  exit_program(1);
3102  }
3103  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3104  par_dst->block_align= 0;
3105  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3106  par_dst->block_align= 0;
3107  break;
3108  case AVMEDIA_TYPE_VIDEO:
3109  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3110  sar =
3111  av_mul_q(ost->frame_aspect_ratio,
3112  (AVRational){ par_dst->height, par_dst->width });
3113  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3114  "with stream copy may produce invalid files\n");
3115  }
3116  else if (ist->st->sample_aspect_ratio.num)
3117  sar = ist->st->sample_aspect_ratio;
3118  else
3119  sar = par_src->sample_aspect_ratio;
3120  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3121  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3122  ost->st->r_frame_rate = ist->st->r_frame_rate;
3123  break;
3124  }
3125 
3126  ost->mux_timebase = ist->st->time_base;
3127 
3128  return 0;
3129 }
3130 
3132 {
3133  AVDictionaryEntry *e;
3134 
3135  uint8_t *encoder_string;
3136  int encoder_string_len;
3137  int format_flags = 0;
3138  int codec_flags = 0;
3139 
3140  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3141  return;
3142 
3143  e = av_dict_get(of->opts, "fflags", NULL, 0);
3144  if (e) {
3145  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3146  if (!o)
3147  return;
3148  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3149  }
3150  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3151  if (e) {
3152  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3153  if (!o)
3154  return;
3155  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3156  }
3157 
3158  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3159  encoder_string = av_mallocz(encoder_string_len);
3160  if (!encoder_string)
3161  exit_program(1);
3162 
3163  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3164  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3165  else
3166  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3167  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3168  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3170 }
3171 
3172 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3173  AVCodecContext *avctx)
3174 {
3175  char *p;
3176  int n = 1, i, size, index = 0;
3177  int64_t t, *pts;
3178 
3179  for (p = kf; *p; p++)
3180  if (*p == ',')
3181  n++;
3182  size = n;
3183  pts = av_malloc_array(size, sizeof(*pts));
3184  if (!pts) {
3185  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3186  exit_program(1);
3187  }
3188 
3189  p = kf;
3190  for (i = 0; i < n; i++) {
3191  char *next = strchr(p, ',');
3192 
3193  if (next)
3194  *next++ = 0;
3195 
3196  if (!memcmp(p, "chapters", 8)) {
3197 
3198  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3199  int j;
3200 
3201  if (avf->nb_chapters > INT_MAX - size ||
3202  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3203  sizeof(*pts)))) {
3205  "Could not allocate forced key frames array.\n");
3206  exit_program(1);
3207  }
3208  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3209  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3210 
3211  for (j = 0; j < avf->nb_chapters; j++) {
3212  AVChapter *c = avf->chapters[j];
3213  av_assert1(index < size);
3214  pts[index++] = av_rescale_q(c->start, c->time_base,
3215  avctx->time_base) + t;
3216  }
3217 
3218  } else {
3219 
3220  t = parse_time_or_die("force_key_frames", p, 1);
3221  av_assert1(index < size);
3222  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3223 
3224  }
3225 
3226  p = next;
3227  }
3228 
3229  av_assert0(index == size);
3230  qsort(pts, size, sizeof(*pts), compare_int64);
3231  ost->forced_kf_count = size;
3232  ost->forced_kf_pts = pts;
3233 }
3234 
3236 {
3237  InputStream *ist = get_input_stream(ost);
3238  AVCodecContext *enc_ctx = ost->enc_ctx;
3240  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3241  int j, ret;
3242 
3243  set_encoder_id(output_files[ost->file_index], ost);
3244 
3245  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3246  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3247  // which have to be filtered out to prevent leaking them to output files.
3248  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3249 
3250  if (ist) {
3251  ost->st->disposition = ist->st->disposition;
3252 
3253  dec_ctx = ist->dec_ctx;
3254 
3255  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3256  } else {
3257  for (j = 0; j < oc->nb_streams; j++) {
3258  AVStream *st = oc->streams[j];
3259  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3260  break;
3261  }
3262  if (j == oc->nb_streams)
3263  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3266  }
3267 
3268  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3269  if (!ost->frame_rate.num)
3271  if (ist && !ost->frame_rate.num)
3272  ost->frame_rate = ist->framerate;
3273  if (ist && !ost->frame_rate.num)
3274  ost->frame_rate = ist->st->r_frame_rate;
3275  if (ist && !ost->frame_rate.num) {
3276  ost->frame_rate = (AVRational){25, 1};
3278  "No information "
3279  "about the input framerate is available. Falling "
3280  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3281  "if you want a different framerate.\n",
3282  ost->file_index, ost->index);
3283  }
3284 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3285  if (ost->enc->supported_framerates && !ost->force_fps) {
3286  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3287  ost->frame_rate = ost->enc->supported_framerates[idx];
3288  }
3289  // reduce frame rate for mpeg4 to be within the spec limits
3290  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3291  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3292  ost->frame_rate.num, ost->frame_rate.den, 65535);
3293  }
3294  }
3295 
3296  switch (enc_ctx->codec_type) {
3297  case AVMEDIA_TYPE_AUDIO:
3299  if (dec_ctx)
3300  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3301  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3305  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3306  break;
3307  case AVMEDIA_TYPE_VIDEO:
3308  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3309  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3311  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3313  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3314  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3315  }
3316  for (j = 0; j < ost->forced_kf_count; j++)
3317  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3319  enc_ctx->time_base);
3320 
3321  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3322  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3323  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3324  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3325  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3327  if (!strncmp(ost->enc->name, "libx264", 7) &&
3328  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3331  "No pixel format specified, %s for H.264 encoding chosen.\n"
3332  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3334  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3335  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3338  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3339  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3341  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3342  if (dec_ctx)
3343  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3344  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3345 
3346  enc_ctx->framerate = ost->frame_rate;
3347 
3348  ost->st->avg_frame_rate = ost->frame_rate;
3349 
3350  if (!dec_ctx ||
3351  enc_ctx->width != dec_ctx->width ||
3352  enc_ctx->height != dec_ctx->height ||
3353  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3355  }
3356 
3357  if (ost->forced_keyframes) {
3358  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3361  if (ret < 0) {
3363  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3364  return ret;
3365  }
3370 
3371  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3372  // parse it only for static kf timings
3373  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3375  }
3376  }
3377  break;
3378  case AVMEDIA_TYPE_SUBTITLE:
3379  enc_ctx->time_base = AV_TIME_BASE_Q;
3380  if (!enc_ctx->width) {
3381  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3382  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3383  }
3384  break;
3385  case AVMEDIA_TYPE_DATA:
3386  break;
3387  default:
3388  abort();
3389  break;
3390  }
3391 
3392  ost->mux_timebase = enc_ctx->time_base;
3393 
3394  return 0;
3395 }
3396 
3397 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3398 {
3399  int ret = 0;
3400 
3401  if (ost->encoding_needed) {
3402  AVCodec *codec = ost->enc;
3403  AVCodecContext *dec = NULL;
3404  InputStream *ist;
3405 
3406  ret = init_output_stream_encode(ost);
3407  if (ret < 0)
3408  return ret;
3409 
3410  if ((ist = get_input_stream(ost)))
3411  dec = ist->dec_ctx;
3412  if (dec && dec->subtitle_header) {
3413  /* ASS code assumes this buffer is null terminated so add extra byte. */
3415  if (!ost->enc_ctx->subtitle_header)
3416  return AVERROR(ENOMEM);
3417  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3419  }
3420  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3421  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3422  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3423  !codec->defaults &&
3424  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3425  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3426  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3427 
3428  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3432  if (!ost->enc_ctx->hw_frames_ctx)
3433  return AVERROR(ENOMEM);
3434  }
3435 
3436  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3437  if (ret == AVERROR_EXPERIMENTAL)
3438  abort_codec_experimental(codec, 1);
3439  snprintf(error, error_len,
3440  "Error while opening encoder for output stream #%d:%d - "
3441  "maybe incorrect parameters such as bit_rate, rate, width or height",
3442  ost->file_index, ost->index);
3443  return ret;
3444  }
3445  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3446  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3448  ost->enc_ctx->frame_size);
3450  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3451  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3452  " It takes bits/s as argument, not kbits/s\n");
3453 
3455  if (ret < 0) {
3457  "Error initializing the output stream codec context.\n");
3458  exit_program(1);
3459  }
3460  /*
3461  * FIXME: ost->st->codec should't be needed here anymore.
3462  */
3463  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3464  if (ret < 0)
3465  return ret;
3466 
3467  if (ost->enc_ctx->nb_coded_side_data) {
3468  int i;
3469 
3470  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3471  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3472  uint8_t *dst_data;
3473 
3474  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3475  if (!dst_data)
3476  return AVERROR(ENOMEM);
3477  memcpy(dst_data, sd_src->data, sd_src->size);
3478  }
3479  }
3480 
3481  /*
3482  * Add global input side data. For now this is naive, and copies it
3483  * from the input stream's global side data. All side data should
3484  * really be funneled over AVFrame and libavfilter, then added back to
3485  * packet side data, and then potentially using the first packet for
3486  * global side data.
3487  */
3488  if (ist) {
3489  int i;
3490  for (i = 0; i < ist->st->nb_side_data; i++) {
3491  AVPacketSideData *sd = &ist->st->side_data[i];
3492  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3493  if (!dst)
3494  return AVERROR(ENOMEM);
3495  memcpy(dst, sd->data, sd->size);
3496  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3497  av_display_rotation_set((uint32_t *)dst, 0);
3498  }
3499  }
3500 
3501  // copy timebase while removing common factors
3502  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3503  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3504 
3505  // copy estimated duration as a hint to the muxer
3506  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3507  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3508 
3509  ost->st->codec->codec= ost->enc_ctx->codec;
3510  } else if (ost->stream_copy) {
3511  ret = init_output_stream_streamcopy(ost);
3512  if (ret < 0)
3513  return ret;
3514 
3515  /*
3516  * FIXME: will the codec context used by the parser during streamcopy
3517  * This should go away with the new parser API.
3518  */
3519  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3520  if (ret < 0)
3521  return ret;
3522  }
3523 
3524  // parse user provided disposition, and update stream values
3525  if (ost->disposition) {
3526  static const AVOption opts[] = {
3527  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3528  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3529  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3530  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3531  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3532  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3533  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3534  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3535  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3536  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3537  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3538  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3539  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3540  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3541  { NULL },
3542  };
3543  static const AVClass class = {
3544  .class_name = "",
3545  .item_name = av_default_item_name,
3546  .option = opts,
3547  .version = LIBAVUTIL_VERSION_INT,
3548  };
3549  const AVClass *pclass = &class;
3550 
3551  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3552  if (ret < 0)
3553  return ret;
3554  }
3555 
3556  /* initialize bitstream filters for the output stream
3557  * needs to be done here, because the codec id for streamcopy is not
3558  * known until now */
3559  ret = init_output_bsfs(ost);
3560  if (ret < 0)
3561  return ret;
3562 
3563  ost->initialized = 1;
3564 
3565  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3566  if (ret < 0)
3567  return ret;
3568 
3569  return ret;
3570 }
3571 
3572 static void report_new_stream(int input_index, AVPacket *pkt)
3573 {
3574  InputFile *file = input_files[input_index];
3575  AVStream *st = file->ctx->streams[pkt->stream_index];
3576 
3577  if (pkt->stream_index < file->nb_streams_warn)
3578  return;
3579  av_log(file->ctx, AV_LOG_WARNING,
3580  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3582  input_index, pkt->stream_index,
3583  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3584  file->nb_streams_warn = pkt->stream_index + 1;
3585 }
3586 
3587 static int transcode_init(void)
3588 {
3589  int ret = 0, i, j, k;
3590  AVFormatContext *oc;
3591  OutputStream *ost;
3592  InputStream *ist;
3593  char error[1024] = {0};
3594 
3595  for (i = 0; i < nb_filtergraphs; i++) {
3596  FilterGraph *fg = filtergraphs[i];
3597  for (j = 0; j < fg->nb_outputs; j++) {
3598  OutputFilter *ofilter = fg->outputs[j];
3599  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3600  continue;
3601  if (fg->nb_inputs != 1)
3602  continue;
3603  for (k = nb_input_streams-1; k >= 0 ; k--)
3604  if (fg->inputs[0]->ist == input_streams[k])
3605  break;
3606  ofilter->ost->source_index = k;
3607  }
3608  }
3609 
3610  /* init framerate emulation */
3611  for (i = 0; i < nb_input_files; i++) {
3612  InputFile *ifile = input_files[i];
3613  if (ifile->rate_emu)
3614  for (j = 0; j < ifile->nb_streams; j++)
3615  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3616  }
3617 
3618  /* init input streams */
3619  for (i = 0; i < nb_input_streams; i++)
3620  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3621  for (i = 0; i < nb_output_streams; i++) {
3622  ost = output_streams[i];
3623  avcodec_close(ost->enc_ctx);
3624  }
3625  goto dump_format;
3626  }
3627 
3628  /* open each encoder */
3629  for (i = 0; i < nb_output_streams; i++) {
3630  // skip streams fed from filtergraphs until we have a frame for them
3631  if (output_streams[i]->filter)
3632  continue;
3633 
3634  ret = init_output_stream(output_streams[i], error, sizeof(error));
3635  if (ret < 0)
3636  goto dump_format;
3637  }
3638 
3639  /* discard unused programs */
3640  for (i = 0; i < nb_input_files; i++) {
3641  InputFile *ifile = input_files[i];
3642  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3643  AVProgram *p = ifile->ctx->programs[j];
3644  int discard = AVDISCARD_ALL;
3645 
3646  for (k = 0; k < p->nb_stream_indexes; k++)
3647  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3648  discard = AVDISCARD_DEFAULT;
3649  break;
3650  }
3651  p->discard = discard;
3652  }
3653  }
3654 
3655  /* write headers for files with no streams */
3656  for (i = 0; i < nb_output_files; i++) {
3657  oc = output_files[i]->ctx;
3658  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3659  ret = check_init_output_file(output_files[i], i);
3660  if (ret < 0)
3661  goto dump_format;
3662  }
3663  }
3664 
3665  dump_format:
3666  /* dump the stream mapping */
3667  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3668  for (i = 0; i < nb_input_streams; i++) {
3669  ist = input_streams[i];
3670 
3671  for (j = 0; j < ist->nb_filters; j++) {
3672  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3673  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3674  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3675  ist->filters[j]->name);
3676  if (nb_filtergraphs > 1)
3677  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3678  av_log(NULL, AV_LOG_INFO, "\n");
3679  }
3680  }
3681  }
3682 
3683  for (i = 0; i < nb_output_streams; i++) {
3684  ost = output_streams[i];
3685 
3686  if (ost->attachment_filename) {
3687  /* an attached file */
3688  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3689  ost->attachment_filename, ost->file_index, ost->index);
3690  continue;
3691  }
3692 
3693  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3694  /* output from a complex graph */
3695  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3696  if (nb_filtergraphs > 1)
3697  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3698 
3699  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3700  ost->index, ost->enc ? ost->enc->name : "?");
3701  continue;
3702  }
3703 
3704  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3705  input_streams[ost->source_index]->file_index,
3706  input_streams[ost->source_index]->st->index,
3707  ost->file_index,
3708  ost->index);
3709  if (ost->sync_ist != input_streams[ost->source_index])
3710  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3711  ost->sync_ist->file_index,
3712  ost->sync_ist->st->index);
3713  if (ost->stream_copy)
3714  av_log(NULL, AV_LOG_INFO, " (copy)");
3715  else {
3716  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3717  const AVCodec *out_codec = ost->enc;
3718  const char *decoder_name = "?";
3719  const char *in_codec_name = "?";
3720  const char *encoder_name = "?";
3721  const char *out_codec_name = "?";
3722  const AVCodecDescriptor *desc;
3723 
3724  if (in_codec) {
3725  decoder_name = in_codec->name;
3726  desc = avcodec_descriptor_get(in_codec->id);
3727  if (desc)
3728  in_codec_name = desc->name;
3729  if (!strcmp(decoder_name, in_codec_name))
3730  decoder_name = "native";
3731  }
3732 
3733  if (out_codec) {
3734  encoder_name = out_codec->name;
3735  desc = avcodec_descriptor_get(out_codec->id);
3736  if (desc)
3737  out_codec_name = desc->name;
3738  if (!strcmp(encoder_name, out_codec_name))
3739  encoder_name = "native";
3740  }
3741 
3742  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3743  in_codec_name, decoder_name,
3744  out_codec_name, encoder_name);
3745  }
3746  av_log(NULL, AV_LOG_INFO, "\n");
3747  }
3748 
3749  if (ret) {
3750  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3751  return ret;
3752  }
3753 
3755 
3756  return 0;
3757 }
3758 
3759 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3760 static int need_output(void)
3761 {
3762  int i;
3763 
3764  for (i = 0; i < nb_output_streams; i++) {
3765  OutputStream *ost = output_streams[i];
3766  OutputFile *of = output_files[ost->file_index];
3767  AVFormatContext *os = output_files[ost->file_index]->ctx;
3768 
3769  if (ost->finished ||
3770  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3771  continue;
3772  if (ost->frame_number >= ost->max_frames) {
3773  int j;
3774  for (j = 0; j < of->ctx->nb_streams; j++)
3775  close_output_stream(output_streams[of->ost_index + j]);
3776  continue;
3777  }
3778 
3779  return 1;
3780  }
3781 
3782  return 0;
3783 }
3784 
3785 /**
3786  * Select the output stream to process.
3787  *
3788  * @return selected output stream, or NULL if none available
3789  */
3791 {
3792  int i;
3793  int64_t opts_min = INT64_MAX;
3794  OutputStream *ost_min = NULL;
3795 
3796  for (i = 0; i < nb_output_streams; i++) {
3797  OutputStream *ost = output_streams[i];
3798  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3799  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3800  AV_TIME_BASE_Q);
3801  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3802  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3803 
3804  if (!ost->initialized && !ost->inputs_done)
3805  return ost;
3806 
3807  if (!ost->finished && opts < opts_min) {
3808  opts_min = opts;
3809  ost_min = ost->unavailable ? NULL : ost;
3810  }
3811  }
3812  return ost_min;
3813 }
3814 
3815 static void set_tty_echo(int on)
3816 {
3817 #if HAVE_TERMIOS_H
3818  struct termios tty;
3819  if (tcgetattr(0, &tty) == 0) {
3820  if (on) tty.c_lflag |= ECHO;
3821  else tty.c_lflag &= ~ECHO;
3822  tcsetattr(0, TCSANOW, &tty);
3823  }
3824 #endif
3825 }
3826 
3828 {
3829  int i, ret, key;
3830  static int64_t last_time;
3831  if (received_nb_signals)
3832  return AVERROR_EXIT;
3833  /* read_key() returns 0 on EOF */
3834  if(cur_time - last_time >= 100000 && !run_as_daemon){
3835  key = read_key();
3836  last_time = cur_time;
3837  }else
3838  key = -1;
3839  if (key == 'q')
3840  return AVERROR_EXIT;
3841  if (key == '+') av_log_set_level(av_log_get_level()+10);
3842  if (key == '-') av_log_set_level(av_log_get_level()-10);
3843  if (key == 's') qp_hist ^= 1;
3844  if (key == 'h'){
3845  if (do_hex_dump){
3846  do_hex_dump = do_pkt_dump = 0;
3847  } else if(do_pkt_dump){
3848  do_hex_dump = 1;
3849  } else
3850  do_pkt_dump = 1;
3852  }
3853  if (key == 'c' || key == 'C'){
3854  char buf[4096], target[64], command[256], arg[256] = {0};
3855  double time;
3856  int k, n = 0;
3857  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3858  i = 0;
3859  set_tty_echo(1);
3860  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3861  if (k > 0)
3862  buf[i++] = k;
3863  buf[i] = 0;
3864  set_tty_echo(0);
3865  fprintf(stderr, "\n");
3866  if (k > 0 &&
3867  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3868  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3869  target, time, command, arg);
3870  for (i = 0; i < nb_filtergraphs; i++) {
3871  FilterGraph *fg = filtergraphs[i];
3872  if (fg->graph) {
3873  if (time < 0) {
3874  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3875  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3876  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3877  } else if (key == 'c') {
3878  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3879  ret = AVERROR_PATCHWELCOME;
3880  } else {
3881  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3882  if (ret < 0)
3883  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3884  }
3885  }
3886  }
3887  } else {
3889  "Parse error, at least 3 arguments were expected, "
3890  "only %d given in string '%s'\n", n, buf);
3891  }
3892  }
3893  if (key == 'd' || key == 'D'){
3894  int debug=0;
3895  if(key == 'D') {
3896  debug = input_streams[0]->st->codec->debug<<1;
3897  if(!debug) debug = 1;
3898  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3899  debug += debug;
3900  }else{
3901  char buf[32];
3902  int k = 0;
3903  i = 0;
3904  set_tty_echo(1);
3905  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3906  if (k > 0)
3907  buf[i++] = k;
3908  buf[i] = 0;
3909  set_tty_echo(0);
3910  fprintf(stderr, "\n");
3911  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3912  fprintf(stderr,"error parsing debug value\n");
3913  }
3914  for(i=0;i<nb_input_streams;i++) {
3915  input_streams[i]->st->codec->debug = debug;
3916  }
3917  for(i=0;i<nb_output_streams;i++) {
3918  OutputStream *ost = output_streams[i];
3919  ost->enc_ctx->debug = debug;
3920  }
3921  if(debug) av_log_set_level(AV_LOG_DEBUG);
3922  fprintf(stderr,"debug=%d\n", debug);
3923  }
3924  if (key == '?'){
3925  fprintf(stderr, "key function\n"
3926  "? show this help\n"
3927  "+ increase verbosity\n"
3928  "- decrease verbosity\n"
3929  "c Send command to first matching filter supporting it\n"
3930  "C Send/Queue command to all matching filters\n"
3931  "D cycle through available debug modes\n"
3932  "h dump packets/hex press to cycle through the 3 states\n"
3933  "q quit\n"
3934  "s Show QP histogram\n"
3935  );
3936  }
3937  return 0;
3938 }
3939 
3940 #if HAVE_PTHREADS
3941 static void *input_thread(void *arg)
3942 {
3943  InputFile *f = arg;
3944  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3945  int ret = 0;
3946 
3947  while (1) {
3948  AVPacket pkt;
3949  ret = av_read_frame(f->ctx, &pkt);
3950 
3951  if (ret == AVERROR(EAGAIN)) {
3952  av_usleep(10000);
3953  continue;
3954  }
3955  if (ret < 0) {
3956  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3957  break;
3958  }
3959  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3960  if (flags && ret == AVERROR(EAGAIN)) {
3961  flags = 0;
3962  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3964  "Thread message queue blocking; consider raising the "
3965  "thread_queue_size option (current value: %d)\n",
3966  f->thread_queue_size);
3967  }
3968  if (ret < 0) {
3969  if (ret != AVERROR_EOF)
3970  av_log(f->ctx, AV_LOG_ERROR,
3971  "Unable to send packet to main thread: %s\n",
3972  av_err2str(ret));
3973  av_packet_unref(&pkt);
3974  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3975  break;
3976  }
3977  }
3978 
3979  return NULL;
3980 }
3981 
3982 static void free_input_threads(void)
3983 {
3984  int i;
3985 
3986  for (i = 0; i < nb_input_files; i++) {
3987  InputFile *f = input_files[i];
3988  AVPacket pkt;
3989 
3990  if (!f || !f->in_thread_queue)
3991  continue;
3993  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3994  av_packet_unref(&pkt);
3995 
3996  pthread_join(f->thread, NULL);
3997  f->joined = 1;
3998  av_thread_message_queue_free(&f->in_thread_queue);
3999  }
4000 }
4001 
4002 static int init_input_threads(void)
4003 {
4004  int i, ret;
4005 
4006  if (nb_input_files == 1)
4007  return 0;
4008 
4009  for (i = 0; i < nb_input_files; i++) {
4010  InputFile *f = input_files[i];
4011 
4012  if (f->ctx->pb ? !f->ctx->pb->seekable :
4013  strcmp(f->ctx->iformat->name, "lavfi"))
4014  f->non_blocking = 1;
4015  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4016  f->thread_queue_size, sizeof(AVPacket));
4017  if (ret < 0)
4018  return ret;
4019 
4020  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4021  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4022  av_thread_message_queue_free(&f->in_thread_queue);
4023  return AVERROR(ret);
4024  }
4025  }
4026  return 0;
4027 }
4028 
4029 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4030 {
4031  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4032  f->non_blocking ?
4034 }
4035 #endif
4036 
4038 {
4039  if (f->rate_emu) {
4040  int i;
4041  for (i = 0; i < f->nb_streams; i++) {
4042  InputStream *ist = input_streams[f->ist_index + i];
4043  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4044  int64_t now = av_gettime_relative() - ist->start;
4045  if (pts > now)
4046  return AVERROR(EAGAIN);
4047  }
4048  }
4049 
4050 #if HAVE_PTHREADS
4051  if (nb_input_files > 1)
4052  return get_input_packet_mt(f, pkt);
4053 #endif
4054  return av_read_frame(f->ctx, pkt);
4055 }
4056 
4057 static int got_eagain(void)
4058 {
4059  int i;
4060  for (i = 0; i < nb_output_streams; i++)
4061  if (output_streams[i]->unavailable)
4062  return 1;
4063  return 0;
4064 }
4065 
4066 static void reset_eagain(void)
4067 {
4068  int i;
4069  for (i = 0; i < nb_input_files; i++)
4070  input_files[i]->eagain = 0;
4071  for (i = 0; i < nb_output_streams; i++)
4072  output_streams[i]->unavailable = 0;
4073 }
4074 
4075 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4076 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4077  AVRational time_base)
4078 {
4079  int ret;
4080 
4081  if (!*duration) {
4082  *duration = tmp;
4083  return tmp_time_base;
4084  }
4085 
4086  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4087  if (ret < 0) {
4088  *duration = tmp;
4089  return tmp_time_base;
4090  }
4091 
4092  return time_base;
4093 }
4094 
4096 {
4097  InputStream *ist;
4098  AVCodecContext *avctx;
4099  int i, ret, has_audio = 0;
4100  int64_t duration = 0;
4101 
4102  ret = av_seek_frame(is, -1, is->start_time, 0);
4103  if (ret < 0)
4104  return ret;
4105 
4106  for (i = 0; i < ifile->nb_streams; i++) {
4107  ist = input_streams[ifile->ist_index + i];
4108  avctx = ist->dec_ctx;
4109 
4110  // flush decoders
4111  if (ist->decoding_needed) {
4112  process_input_packet(ist, NULL, 1);
4113  avcodec_flush_buffers(avctx);
4114  }
4115 
4116  /* duration is the length of the last frame in a stream
4117  * when audio stream is present we don't care about
4118  * last video frame length because it's not defined exactly */
4119  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4120  has_audio = 1;
4121  }
4122 
4123  for (i = 0; i < ifile->nb_streams; i++) {
4124  ist = input_streams[ifile->ist_index + i];
4125  avctx = ist->dec_ctx;
4126 
4127  if (has_audio) {
4128  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4129  AVRational sample_rate = {1, avctx->sample_rate};
4130 
4131  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4132  } else
4133  continue;
4134  } else {
4135  if (ist->framerate.num) {
4136  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4137  } else if (ist->st->avg_frame_rate.num) {
4138  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4139  } else duration = 1;
4140  }
4141  if (!ifile->duration)
4142  ifile->time_base = ist->st->time_base;
4143  /* the total duration of the stream, max_pts - min_pts is
4144  * the duration of the stream without the last frame */
4145  duration += ist->max_pts - ist->min_pts;
4146  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4147  ifile->time_base);
4148  }
4149 
4150  if (ifile->loop > 0)
4151  ifile->loop--;
4152 
4153  return ret;
4154 }
4155 
4156 /*
4157  * Return
4158  * - 0 -- one packet was read and processed
4159  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4160  * this function should be called again
4161  * - AVERROR_EOF -- this function should not be called again
4162  */
4163 static int process_input(int file_index)
4164 {
4165  InputFile *ifile = input_files[file_index];
4166  AVFormatContext *is;
4167  InputStream *ist;
4168  AVPacket pkt;
4169  int ret, i, j;
4170  int64_t duration;
4171  int64_t pkt_dts;
4172 
4173  is = ifile->ctx;
4174  ret = get_input_packet(ifile, &pkt);
4175 
4176  if (ret == AVERROR(EAGAIN)) {
4177  ifile->eagain = 1;
4178  return ret;
4179  }
4180  if (ret < 0 && ifile->loop) {
4181  if ((ret = seek_to_start(ifile, is)) < 0)
4182  return ret;
4183  ret = get_input_packet(ifile, &pkt);
4184  if (ret == AVERROR(EAGAIN)) {
4185  ifile->eagain = 1;
4186  return ret;
4187  }
4188  }
4189  if (ret < 0) {
4190  if (ret != AVERROR_EOF) {
4191  print_error(is->filename, ret);
4192  if (exit_on_error)
4193  exit_program(1);
4194  }
4195 
4196  for (i = 0; i < ifile->nb_streams; i++) {
4197  ist = input_streams[ifile->ist_index + i];
4198  if (ist->decoding_needed) {
4199  ret = process_input_packet(ist, NULL, 0);
4200  if (ret>0)
4201  return 0;
4202  }
4203 
4204  /* mark all outputs that don't go through lavfi as finished */
4205  for (j = 0; j < nb_output_streams; j++) {
4206  OutputStream *ost = output_streams[j];
4207 
4208  if (ost->source_index == ifile->ist_index + i &&
4209  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4210  finish_output_stream(ost);
4211  }
4212  }
4213 
4214  ifile->eof_reached = 1;
4215  return AVERROR(EAGAIN);
4216  }
4217 
4218  reset_eagain();
4219 
4220  if (do_pkt_dump) {
4222  is->streams[pkt.stream_index]);
4223  }
4224  /* the following test is needed in case new streams appear
4225  dynamically in stream : we ignore them */
4226  if (pkt.stream_index >= ifile->nb_streams) {
4227  report_new_stream(file_index, &pkt);
4228  goto discard_packet;
4229  }
4230 
4231  ist = input_streams[ifile->ist_index + pkt.stream_index];
4232 
4233  ist->data_size += pkt.size;
4234  ist->nb_packets++;
4235 
4236  if (ist->discard)
4237  goto discard_packet;
4238 
4239  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4240  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4241  exit_program(1);
4242  }
4243 
4244  if (debug_ts) {
4245  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4246  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4250  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4251  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4252  av_ts2str(input_files[ist->file_index]->ts_offset),
4253  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4254  }
4255 
4256  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4257  int64_t stime, stime2;
4258  // Correcting starttime based on the enabled streams
4259  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4260  // so we instead do it here as part of discontinuity handling
4261  if ( ist->next_dts == AV_NOPTS_VALUE
4262  && ifile->ts_offset == -is->start_time
4263  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4264  int64_t new_start_time = INT64_MAX;
4265  for (i=0; i<is->nb_streams; i++) {
4266  AVStream *st = is->streams[i];
4267  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4268  continue;
4269  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4270  }
4271  if (new_start_time > is->start_time) {
4272  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4273  ifile->ts_offset = -new_start_time;
4274  }
4275  }
4276 
4277  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4278  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4279  ist->wrap_correction_done = 1;
4280 
4281  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4282  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4283  ist->wrap_correction_done = 0;
4284  }
4285  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4286  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4287  ist->wrap_correction_done = 0;
4288  }
4289  }
4290 
4291  /* add the stream-global side data to the first packet */
4292  if (ist->nb_packets == 1) {
4293  for (i = 0; i < ist->st->nb_side_data; i++) {
4294  AVPacketSideData *src_sd = &ist->st->side_data[i];
4295  uint8_t *dst_data;
4296 
4297  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4298  continue;
4299 
4300  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4301  continue;
4302 
4303  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4304  if (!dst_data)
4305  exit_program(1);
4306 
4307  memcpy(dst_data, src_sd->data, src_sd->size);
4308  }
4309  }
4310 
4311  if (pkt.dts != AV_NOPTS_VALUE)
4312  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4313  if (pkt.pts != AV_NOPTS_VALUE)
4314  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4315 
4316  if (pkt.pts != AV_NOPTS_VALUE)
4317  pkt.pts *= ist->ts_scale;
4318  if (pkt.dts != AV_NOPTS_VALUE)
4319  pkt.dts *= ist->ts_scale;
4320 
4322  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4324  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4325  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4326  int64_t delta = pkt_dts - ifile->last_ts;
4327  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4328  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4329  ifile->ts_offset -= delta;
4331  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4332  delta, ifile->ts_offset);
4333  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4334  if (pkt.pts != AV_NOPTS_VALUE)
4335  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4336  }
4337  }
4338 
4339  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4340  if (pkt.pts != AV_NOPTS_VALUE) {
4341  pkt.pts += duration;
4342  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4343  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4344  }
4345 
4346  if (pkt.dts != AV_NOPTS_VALUE)
4347  pkt.dts += duration;
4348 
4350  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4352  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4353  !copy_ts) {
4354  int64_t delta = pkt_dts - ist->next_dts;
4355  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4356  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4357  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4358  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4359  ifile->ts_offset -= delta;
4361  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4362  delta, ifile->ts_offset);
4363  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4364  if (pkt.pts != AV_NOPTS_VALUE)
4365  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4366  }
4367  } else {
4368  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4369  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4370  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4371  pkt.dts = AV_NOPTS_VALUE;
4372  }
4373  if (pkt.pts != AV_NOPTS_VALUE){
4374  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4375  delta = pkt_pts - ist->next_dts;
4376  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4377  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4378  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4379  pkt.pts = AV_NOPTS_VALUE;
4380  }
4381  }
4382  }
4383  }
4384 
4385  if (pkt.dts != AV_NOPTS_VALUE)
4386  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4387 
4388  if (debug_ts) {
4389  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4391  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4392  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4393  av_ts2str(input_files[ist->file_index]->ts_offset),
4394  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4395  }
4396 
4397  sub2video_heartbeat(ist, pkt.pts);
4398 
4399  process_input_packet(ist, &pkt, 0);
4400 
4401 discard_packet:
4402  av_packet_unref(&pkt);
4403 
4404  return 0;
4405 }
4406 
4407 /**
4408  * Perform a step of transcoding for the specified filter graph.
4409  *
4410  * @param[in] graph filter graph to consider
4411  * @param[out] best_ist input stream where a frame would allow to continue
4412  * @return 0 for success, <0 for error
4413  */
4414 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4415 {
4416  int i, ret;
4417  int nb_requests, nb_requests_max = 0;
4418  InputFilter *ifilter;
4419  InputStream *ist;
4420 
4421  *best_ist = NULL;
4422  ret = avfilter_graph_request_oldest(graph->graph);
4423  if (ret >= 0)
4424  return reap_filters(0);
4425 
4426  if (ret == AVERROR_EOF) {
4427  ret = reap_filters(1);
4428  for (i = 0; i < graph->nb_outputs; i++)
4429  close_output_stream(graph->outputs[i]->ost);
4430  return ret;
4431  }
4432  if (ret != AVERROR(EAGAIN))
4433  return ret;
4434 
4435  for (i = 0; i < graph->nb_inputs; i++) {
4436  ifilter = graph->inputs[i];
4437  ist = ifilter->ist;
4438  if (input_files[ist->file_index]->eagain ||
4439  input_files[ist->file_index]->eof_reached)
4440  continue;
4441  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4442  if (nb_requests > nb_requests_max) {
4443  nb_requests_max = nb_requests;
4444  *best_ist = ist;
4445  }
4446  }
4447 
4448  if (!*best_ist)
4449  for (i = 0; i < graph->nb_outputs; i++)
4450  graph->outputs[i]->ost->unavailable = 1;
4451 
4452  return 0;
4453 }
4454 
4455 /**
4456  * Run a single step of transcoding.
4457  *
4458  * @return 0 for success, <0 for error
4459  */
4460 static int transcode_step(void)
4461 {
4462  OutputStream *ost;
4463  InputStream *ist = NULL;
4464  int ret;
4465 
4466  ost = choose_output();
4467  if (!ost) {
4468  if (got_eagain()) {
4469  reset_eagain();
4470  av_usleep(10000);
4471  return 0;
4472  }
4473  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4474  return AVERROR_EOF;
4475  }
4476 
4477  if (ost->filter && !ost->filter->graph->graph) {
4479  ret = configure_filtergraph(ost->filter->graph);
4480  if (ret < 0) {
4481  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4482  return ret;
4483  }
4484  }
4485  }
4486 
4487  if (ost->filter && ost->filter->graph->graph) {
4488  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4489  return ret;
4490  if (!ist)
4491  return 0;
4492  } else if (ost->filter) {
4493  int i;
4494  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4495  InputFilter *ifilter = ost->filter->graph->inputs[i];
4496  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4497  ist = ifilter->ist;
4498  break;
4499  }
4500  }
4501  if (!ist) {
4502  ost->inputs_done = 1;
4503  return 0;
4504  }
4505  } else {
4506  av_assert0(ost->source_index >= 0);
4507  ist = input_streams[ost->source_index];
4508  }
4509 
4510  ret = process_input(ist->file_index);
4511  if (ret == AVERROR(EAGAIN)) {
4512  if (input_files[ist->file_index]->eagain)
4513  ost->unavailable = 1;
4514  return 0;
4515  }
4516 
4517  if (ret < 0)
4518  return ret == AVERROR_EOF ? 0 : ret;
4519 
4520  return reap_filters(0);
4521 }
4522 
4523 /*
4524  * The following code is the main loop of the file converter
4525  */
4526 static int transcode(void)
4527 {
4528  int ret, i;
4529  AVFormatContext *os;
4530  OutputStream *ost;
4531  InputStream *ist;
4532  int64_t timer_start;
4533  int64_t total_packets_written = 0;
4534 
4535  ret = transcode_init();
4536  if (ret < 0)
4537  goto fail;
4538 
4539  if (stdin_interaction) {
4540  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4541  }
4542 
4543  timer_start = av_gettime_relative();
4544 
4545 #if HAVE_PTHREADS
4546  if ((ret = init_input_threads()) < 0)
4547  goto fail;
4548 #endif
4549 
4550  while (!received_sigterm) {
4551  int64_t cur_time= av_gettime_relative();
4552 
4553  /* if 'q' pressed, exits */
4554  if (stdin_interaction)
4555  if (check_keyboard_interaction(cur_time) < 0)
4556  break;
4557 
4558  /* check if there's any stream where output is still needed */
4559  if (!need_output()) {
4560  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4561  break;
4562  }
4563 
4564  ret = transcode_step();
4565  if (ret < 0 && ret != AVERROR_EOF) {
4566  char errbuf[128];
4567  av_strerror(ret, errbuf, sizeof(errbuf));
4568 
4569  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4570  break;
4571  }
4572 
4573  /* dump report by using the output first video and audio streams */
4574  print_report(0, timer_start, cur_time);
4575  }
4576 #if HAVE_PTHREADS
4577  free_input_threads();
4578 #endif
4579 
4580  /* at the end of stream, we must flush the decoder buffers */
4581  for (i = 0; i < nb_input_streams; i++) {
4582  ist = input_streams[i];
4583  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4584  process_input_packet(ist, NULL, 0);
4585  }
4586  }
4587  flush_encoders();
4588 
4589  term_exit();
4590 
4591  /* write the trailer if needed and close file */
4592  for (i = 0; i < nb_output_files; i++) {
4593  os = output_files[i]->ctx;
4594  if (!output_files[i]->header_written) {
4596  "Nothing was written into output file %d (%s), because "
4597  "at least one of its streams received no packets.\n",
4598  i, os->filename);
4599  continue;
4600  }
4601  if ((ret = av_write_trailer(os)) < 0) {
4602  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4603  if (exit_on_error)
4604  exit_program(1);
4605  }
4606  }
4607 
4608  /* dump report by using the first video and audio streams */
4609  print_report(1, timer_start, av_gettime_relative());
4610 
4611  /* close each encoder */
4612  for (i = 0; i < nb_output_streams; i++) {
4613  ost = output_streams[i];
4614  if (ost->encoding_needed) {
4615  av_freep(&ost->enc_ctx->stats_in);
4616  }
4617  total_packets_written += ost->packets_written;
4618  }
4619 
4620  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4621  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4622  exit_program(1);
4623  }
4624 
4625  /* close each decoder */
4626  for (i = 0; i < nb_input_streams; i++) {
4627  ist = input_streams[i];
4628  if (ist->decoding_needed) {
4629  avcodec_close(ist->dec_ctx);
4630  if (ist->hwaccel_uninit)
4631  ist->hwaccel_uninit(ist->dec_ctx);
4632  }
4633  }
4634 
4636 
4637  /* finished ! */
4638  ret = 0;
4639 
4640  fail:
4641 #if HAVE_PTHREADS
4642  free_input_threads();
4643 #endif
4644 
4645  if (output_streams) {
4646  for (i = 0; i < nb_output_streams; i++) {
4647  ost = output_streams[i];
4648  if (ost) {
4649  if (ost->logfile) {
4650  if (fclose(ost->logfile))
4652  "Error closing logfile, loss of information possible: %s\n",
4653  av_err2str(AVERROR(errno)));
4654  ost->logfile = NULL;
4655  }
4656  av_freep(&ost->forced_kf_pts);
4657  av_freep(&ost->apad);
4658  av_freep(&ost->disposition);
4659  av_dict_free(&ost->encoder_opts);
4660  av_dict_free(&ost->sws_dict);
4661  av_dict_free(&ost->