FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
499  sizeof(frame), NULL);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
528  }
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
546  }
547  for (i = 0; i < nb_output_streams; i++) {
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
557  av_frame_free(&ost->filtered_frame);
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
562  av_expr_free(ost->forced_keyframes_pexpr);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
566  av_freep(&ost->audio_channels_map);
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570  av_dict_free(&ost->swr_opts);
571 
572  avcodec_free_context(&ost->enc_ctx);
573  avcodec_parameters_free(&ost->ref_par);
574 
575  if (ost->muxing_queue) {
576  while (av_fifo_size(ost->muxing_queue)) {
577  AVPacket pkt;
578  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
580  }
581  av_fifo_freep(&ost->muxing_queue);
582  }
583 
585  }
586 #if HAVE_THREADS
587  free_input_threads();
588 #endif
589  for (i = 0; i < nb_input_files; i++) {
592  }
593  for (i = 0; i < nb_input_streams; i++) {
594  InputStream *ist = input_streams[i];
595 
598  av_dict_free(&ist->decoder_opts);
601  av_freep(&ist->filters);
602  av_freep(&ist->hwaccel_device);
603  av_freep(&ist->dts_buffer);
604 
606 
608  }
609 
610  if (vstats_file) {
611  if (fclose(vstats_file))
613  "Error closing vstats file, loss of information possible: %s\n",
614  av_err2str(AVERROR(errno)));
615  }
617 
622 
623  uninit_opts();
624 
626 
627  if (received_sigterm) {
628  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629  (int) received_sigterm);
630  } else if (ret && atomic_load(&transcode_init_done)) {
631  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632  }
633  term_exit();
634  ffmpeg_exited = 1;
635 }
636 
638 {
639  AVDictionaryEntry *t = NULL;
640 
641  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
643  }
644 }
645 
647 {
649  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
651  exit_program(1);
652  }
653 }
654 
655 static void abort_codec_experimental(AVCodec *c, int encoder)
656 {
657  exit_program(1);
658 }
659 
660 static void update_benchmark(const char *fmt, ...)
661 {
662  if (do_benchmark_all) {
664  va_list va;
665  char buf[1024];
666 
667  if (fmt) {
668  va_start(va, fmt);
669  vsnprintf(buf, sizeof(buf), fmt, va);
670  va_end(va);
672  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
676  }
677  current_time = t;
678  }
679 }
680 
682 {
683  int i;
684  for (i = 0; i < nb_output_streams; i++) {
685  OutputStream *ost2 = output_streams[i];
686  ost2->finished |= ost == ost2 ? this_stream : others;
687  }
688 }
689 
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 {
692  AVFormatContext *s = of->ctx;
693  AVStream *st = ost->st;
694  int ret;
695 
696  /*
697  * Audio encoders may split the packets -- #frames in != #packets out.
698  * But there is no reordering, so we can limit the number of output packets
699  * by simply dropping them here.
700  * Counting encoded video frames needs to be done separately because of
701  * reordering, see do_video_out().
702  * Do not count the packet when unqueued because it has been counted when queued.
703  */
704  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705  if (ost->frame_number >= ost->max_frames) {
707  return;
708  }
709  ost->frame_number++;
710  }
711 
712  if (!of->header_written) {
713  AVPacket tmp_pkt = {0};
714  /* the muxer is not initialized yet, buffer the packet */
715  if (!av_fifo_space(ost->muxing_queue)) {
716  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717  ost->max_muxing_queue_size);
718  if (new_size <= av_fifo_size(ost->muxing_queue)) {
720  "Too many packets buffered for output stream %d:%d.\n",
721  ost->file_index, ost->st->index);
722  exit_program(1);
723  }
724  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
725  if (ret < 0)
726  exit_program(1);
727  }
729  if (ret < 0)
730  exit_program(1);
731  av_packet_move_ref(&tmp_pkt, pkt);
732  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
733  return;
734  }
735 
738  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 
740  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741  int i;
743  NULL);
744  ost->quality = sd ? AV_RL32(sd) : -1;
745  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 
747  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748  if (sd && i < sd[5])
749  ost->error[i] = AV_RL64(sd + 8 + 8*i);
750  else
751  ost->error[i] = -1;
752  }
753 
754  if (ost->frame_rate.num && ost->is_cfr) {
755  if (pkt->duration > 0)
756  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
758  ost->mux_timebase);
759  }
760  }
761 
762  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 
764  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765  if (pkt->dts != AV_NOPTS_VALUE &&
766  pkt->pts != AV_NOPTS_VALUE &&
767  pkt->dts > pkt->pts) {
768  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769  pkt->dts, pkt->pts,
770  ost->file_index, ost->st->index);
771  pkt->pts =
772  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775  }
777  pkt->dts != AV_NOPTS_VALUE &&
778  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779  ost->last_mux_dts != AV_NOPTS_VALUE) {
780  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781  if (pkt->dts < max) {
782  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783  av_log(s, loglevel, "Non-monotonous DTS in output stream "
784  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786  if (exit_on_error) {
787  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
788  exit_program(1);
789  }
790  av_log(s, loglevel, "changing to %"PRId64". This may result "
791  "in incorrect timestamps in the output file.\n",
792  max);
793  if (pkt->pts >= pkt->dts)
794  pkt->pts = FFMAX(pkt->pts, max);
795  pkt->dts = max;
796  }
797  }
798  }
799  ost->last_mux_dts = pkt->dts;
800 
801  ost->data_size += pkt->size;
802  ost->packets_written++;
803 
805 
806  if (debug_ts) {
807  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_get_media_type_string(ost->enc_ctx->codec_type),
812  pkt->size
813  );
814  }
815 
817  if (ret < 0) {
818  print_error("av_interleaved_write_frame()", ret);
819  main_return_code = 1;
821  }
823 }
824 
826 {
827  OutputFile *of = output_files[ost->file_index];
828 
829  ost->finished |= ENCODER_FINISHED;
830  if (of->shortest) {
831  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
833  }
834 }
835 
836 /*
837  * Send a single packet to the output, applying any bitstream filters
838  * associated with the output stream. This may result in any number
839  * of packets actually being written, depending on what bitstream
840  * filters are applied. The supplied packet is consumed and will be
841  * blank (as if newly-allocated) when this function returns.
842  *
843  * If eof is set, instead indicate EOF to all bitstream filters and
844  * therefore flush any delayed packets to the output. A blank packet
845  * must be supplied in this case.
846  */
848  OutputStream *ost, int eof)
849 {
850  int ret = 0;
851 
852  /* apply the output bitstream filters, if any */
853  if (ost->nb_bitstream_filters) {
854  int idx;
855 
856  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
857  if (ret < 0)
858  goto finish;
859 
860  eof = 0;
861  idx = 1;
862  while (idx) {
863  /* get a packet from the previous filter up the chain */
864  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865  if (ret == AVERROR(EAGAIN)) {
866  ret = 0;
867  idx--;
868  continue;
869  } else if (ret == AVERROR_EOF) {
870  eof = 1;
871  } else if (ret < 0)
872  goto finish;
873 
874  /* send it to the next filter down the chain or to the muxer */
875  if (idx < ost->nb_bitstream_filters) {
876  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
877  if (ret < 0)
878  goto finish;
879  idx++;
880  eof = 0;
881  } else if (eof)
882  goto finish;
883  else
884  write_packet(of, pkt, ost, 0);
885  }
886  } else if (!eof)
887  write_packet(of, pkt, ost, 0);
888 
889 finish:
890  if (ret < 0 && ret != AVERROR_EOF) {
891  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
893  if(exit_on_error)
894  exit_program(1);
895  }
896 }
897 
899 {
900  OutputFile *of = output_files[ost->file_index];
901 
902  if (of->recording_time != INT64_MAX &&
903  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
904  AV_TIME_BASE_Q) >= 0) {
906  return 0;
907  }
908  return 1;
909 }
910 
912  AVFrame *frame)
913 {
914  AVCodecContext *enc = ost->enc_ctx;
915  AVPacket pkt;
916  int ret;
917 
919  pkt.data = NULL;
920  pkt.size = 0;
921 
923  return;
924 
925  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926  frame->pts = ost->sync_opts;
927  ost->sync_opts = frame->pts + frame->nb_samples;
928  ost->samples_encoded += frame->nb_samples;
929  ost->frames_encoded++;
930 
931  av_assert0(pkt.size || !pkt.data);
933  if (debug_ts) {
934  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937  enc->time_base.num, enc->time_base.den);
938  }
939 
940  ret = avcodec_send_frame(enc, frame);
941  if (ret < 0)
942  goto error;
943 
944  while (1) {
945  ret = avcodec_receive_packet(enc, &pkt);
946  if (ret == AVERROR(EAGAIN))
947  break;
948  if (ret < 0)
949  goto error;
950 
951  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 
953  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
954 
955  if (debug_ts) {
956  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
960  }
961 
962  output_packet(of, &pkt, ost, 0);
963  }
964 
965  return;
966 error:
967  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
968  exit_program(1);
969 }
970 
971 static void do_subtitle_out(OutputFile *of,
972  OutputStream *ost,
973  AVSubtitle *sub)
974 {
975  int subtitle_out_max_size = 1024 * 1024;
976  int subtitle_out_size, nb, i;
977  AVCodecContext *enc;
978  AVPacket pkt;
979  int64_t pts;
980 
981  if (sub->pts == AV_NOPTS_VALUE) {
982  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
983  if (exit_on_error)
984  exit_program(1);
985  return;
986  }
987 
988  enc = ost->enc_ctx;
989 
990  if (!subtitle_out) {
991  subtitle_out = av_malloc(subtitle_out_max_size);
992  if (!subtitle_out) {
993  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
994  exit_program(1);
995  }
996  }
997 
998  /* Note: DVB subtitle need one packet to draw them and one other
999  packet to clear them */
1000  /* XXX: signal it in the codec context ? */
1001  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1002  nb = 2;
1003  else
1004  nb = 1;
1005 
1006  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007  pts = sub->pts;
1008  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009  pts -= output_files[ost->file_index]->start_time;
1010  for (i = 0; i < nb; i++) {
1011  unsigned save_num_rects = sub->num_rects;
1012 
1013  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  sub->pts = pts;
1018  // start_display_time is required to be 0
1019  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020  sub->end_display_time -= sub->start_display_time;
1021  sub->start_display_time = 0;
1022  if (i == 1)
1023  sub->num_rects = 0;
1024 
1025  ost->frames_encoded++;
1026 
1027  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028  subtitle_out_max_size, sub);
1029  if (i == 1)
1030  sub->num_rects = save_num_rects;
1031  if (subtitle_out_size < 0) {
1032  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1033  exit_program(1);
1034  }
1035 
1036  av_init_packet(&pkt);
1037  pkt.data = subtitle_out;
1038  pkt.size = subtitle_out_size;
1039  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042  /* XXX: the pts correction is handled here. Maybe handling
1043  it in the codec would be better */
1044  if (i == 0)
1045  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046  else
1047  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1048  }
1049  pkt.dts = pkt.pts;
1050  output_packet(of, &pkt, ost, 0);
1051  }
1052 }
1053 
1054 static void do_video_out(OutputFile *of,
1055  OutputStream *ost,
1056  AVFrame *next_picture,
1057  double sync_ipts)
1058 {
1059  int ret, format_video_sync;
1060  AVPacket pkt;
1061  AVCodecContext *enc = ost->enc_ctx;
1062  AVCodecParameters *mux_par = ost->st->codecpar;
1063  AVRational frame_rate;
1064  int nb_frames, nb0_frames, i;
1065  double delta, delta0;
1066  double duration = 0;
1067  int frame_size = 0;
1068  InputStream *ist = NULL;
1069  AVFilterContext *filter = ost->filter->filter;
1070 
1071  if (ost->source_index >= 0)
1072  ist = input_streams[ost->source_index];
1073 
1074  frame_rate = av_buffersink_get_frame_rate(filter);
1075  if (frame_rate.num > 0 && frame_rate.den > 0)
1076  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 
1078  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 
1081  if (!ost->filters_script &&
1082  !ost->filters &&
1083  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1084  next_picture &&
1085  ist &&
1086  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1087  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088  }
1089 
1090  if (!next_picture) {
1091  //end, flushing
1092  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1093  ost->last_nb0_frames[1],
1094  ost->last_nb0_frames[2]);
1095  } else {
1096  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1097  delta = delta0 + duration;
1098 
1099  /* by default, we output a single frame */
1100  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101  nb_frames = 1;
1102 
1103  format_video_sync = video_sync_method;
1104  if (format_video_sync == VSYNC_AUTO) {
1105  if(!strcmp(of->ctx->oformat->name, "avi")) {
1106  format_video_sync = VSYNC_VFR;
1107  } else
1108  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1109  if ( ist
1110  && format_video_sync == VSYNC_CFR
1111  && input_files[ist->file_index]->ctx->nb_streams == 1
1112  && input_files[ist->file_index]->input_ts_offset == 0) {
1113  format_video_sync = VSYNC_VSCFR;
1114  }
1115  if (format_video_sync == VSYNC_CFR && copy_ts) {
1116  format_video_sync = VSYNC_VSCFR;
1117  }
1118  }
1119  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1120 
1121  if (delta0 < 0 &&
1122  delta > 0 &&
1123  format_video_sync != VSYNC_PASSTHROUGH &&
1124  format_video_sync != VSYNC_DROP) {
1125  if (delta0 < -0.6) {
1126  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1127  } else
1128  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1129  sync_ipts = ost->sync_opts;
1130  duration += delta0;
1131  delta0 = 0;
1132  }
1133 
1134  switch (format_video_sync) {
1135  case VSYNC_VSCFR:
1136  if (ost->frame_number == 0 && delta0 >= 0.5) {
1137  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138  delta = duration;
1139  delta0 = 0;
1140  ost->sync_opts = lrint(sync_ipts);
1141  }
1142  case VSYNC_CFR:
1143  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1145  nb_frames = 0;
1146  } else if (delta < -1.1)
1147  nb_frames = 0;
1148  else if (delta > 1.1) {
1149  nb_frames = lrintf(delta);
1150  if (delta0 > 1.1)
1151  nb0_frames = lrintf(delta0 - 0.6);
1152  }
1153  break;
1154  case VSYNC_VFR:
1155  if (delta <= -0.6)
1156  nb_frames = 0;
1157  else if (delta > 0.6)
1158  ost->sync_opts = lrint(sync_ipts);
1159  break;
1160  case VSYNC_DROP:
1161  case VSYNC_PASSTHROUGH:
1162  ost->sync_opts = lrint(sync_ipts);
1163  break;
1164  default:
1165  av_assert0(0);
1166  }
1167  }
1168 
1169  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1170  nb0_frames = FFMIN(nb0_frames, nb_frames);
1171 
1172  memmove(ost->last_nb0_frames + 1,
1173  ost->last_nb0_frames,
1174  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1175  ost->last_nb0_frames[0] = nb0_frames;
1176 
1177  if (nb0_frames == 0 && ost->last_dropped) {
1178  nb_frames_drop++;
1180  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1181  ost->frame_number, ost->st->index, ost->last_frame->pts);
1182  }
1183  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1184  if (nb_frames > dts_error_threshold * 30) {
1185  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1186  nb_frames_drop++;
1187  return;
1188  }
1189  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1190  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1191  if (nb_frames_dup > dup_warning) {
1192  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1193  dup_warning *= 10;
1194  }
1195  }
1196  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1197 
1198  /* duplicates frame if needed */
1199  for (i = 0; i < nb_frames; i++) {
1200  AVFrame *in_picture;
1201  int forced_keyframe = 0;
1202  double pts_time;
1203  av_init_packet(&pkt);
1204  pkt.data = NULL;
1205  pkt.size = 0;
1206 
1207  if (i < nb0_frames && ost->last_frame) {
1208  in_picture = ost->last_frame;
1209  } else
1210  in_picture = next_picture;
1211 
1212  if (!in_picture)
1213  return;
1214 
1215  in_picture->pts = ost->sync_opts;
1216 
1217  if (!check_recording_time(ost))
1218  return;
1219 
1221  ost->top_field_first >= 0)
1222  in_picture->top_field_first = !!ost->top_field_first;
1223 
1224  if (in_picture->interlaced_frame) {
1225  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227  else
1228  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229  } else
1230  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 
1232  in_picture->quality = enc->global_quality;
1233  in_picture->pict_type = 0;
1234 
1235  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1236  in_picture->pts != AV_NOPTS_VALUE)
1237  ost->forced_kf_ref_pts = in_picture->pts;
1238 
1239  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1241  if (ost->forced_kf_index < ost->forced_kf_count &&
1242  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243  ost->forced_kf_index++;
1244  forced_keyframe = 1;
1245  } else if (ost->forced_keyframes_pexpr) {
1246  double res;
1247  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248  res = av_expr_eval(ost->forced_keyframes_pexpr,
1249  ost->forced_keyframes_expr_const_values, NULL);
1250  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251  ost->forced_keyframes_expr_const_values[FKF_N],
1252  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254  ost->forced_keyframes_expr_const_values[FKF_T],
1255  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1256  res);
1257  if (res) {
1258  forced_keyframe = 1;
1259  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260  ost->forced_keyframes_expr_const_values[FKF_N];
1261  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262  ost->forced_keyframes_expr_const_values[FKF_T];
1263  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1264  }
1265 
1266  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267  } else if ( ost->forced_keyframes
1268  && !strncmp(ost->forced_keyframes, "source", 6)
1269  && in_picture->key_frame==1) {
1270  forced_keyframe = 1;
1271  }
1272 
1273  if (forced_keyframe) {
1274  in_picture->pict_type = AV_PICTURE_TYPE_I;
1275  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276  }
1277 
1279  if (debug_ts) {
1280  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283  enc->time_base.num, enc->time_base.den);
1284  }
1285 
1286  ost->frames_encoded++;
1287 
1288  ret = avcodec_send_frame(enc, in_picture);
1289  if (ret < 0)
1290  goto error;
1291  // Make sure Closed Captions will not be duplicated
1293 
1294  while (1) {
1295  ret = avcodec_receive_packet(enc, &pkt);
1296  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1297  if (ret == AVERROR(EAGAIN))
1298  break;
1299  if (ret < 0)
1300  goto error;
1301 
1302  if (debug_ts) {
1303  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1304  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  }
1308 
1310  pkt.pts = ost->sync_opts;
1311 
1312  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 
1314  if (debug_ts) {
1315  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1316  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1317  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1318  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319  }
1320 
1321  frame_size = pkt.size;
1322  output_packet(of, &pkt, ost, 0);
1323 
1324  /* if two pass, output log */
1325  if (ost->logfile && enc->stats_out) {
1326  fprintf(ost->logfile, "%s", enc->stats_out);
1327  }
1328  }
1329  ost->sync_opts++;
1330  /*
1331  * For video, number of frames in == number of packets out.
1332  * But there may be reordering, so we can't throw away frames on encoder
1333  * flush, we need to limit them here, before they go into encoder.
1334  */
1335  ost->frame_number++;
1336 
1337  if (vstats_filename && frame_size)
1339  }
1340 
1341  if (!ost->last_frame)
1342  ost->last_frame = av_frame_alloc();
1343  av_frame_unref(ost->last_frame);
1344  if (next_picture && ost->last_frame)
1345  av_frame_ref(ost->last_frame, next_picture);
1346  else
1347  av_frame_free(&ost->last_frame);
1348 
1349  return;
1350 error:
1351  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352  exit_program(1);
1353 }
1354 
1355 static double psnr(double d)
1356 {
1357  return -10.0 * log10(d);
1358 }
1359 
1361 {
1362  AVCodecContext *enc;
1363  int frame_number;
1364  double ti1, bitrate, avg_bitrate;
1365 
1366  /* this is executed just the first time do_video_stats is called */
1367  if (!vstats_file) {
1368  vstats_file = fopen(vstats_filename, "w");
1369  if (!vstats_file) {
1370  perror("fopen");
1371  exit_program(1);
1372  }
1373  }
1374 
1375  enc = ost->enc_ctx;
1376  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377  frame_number = ost->st->nb_frames;
1378  if (vstats_version <= 1) {
1379  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380  ost->quality / (float)FF_QP2LAMBDA);
1381  } else {
1382  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383  ost->quality / (float)FF_QP2LAMBDA);
1384  }
1385 
1386  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 
1389  fprintf(vstats_file,"f_size= %6d ", frame_size);
1390  /* compute pts value */
1391  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392  if (ti1 < 0.01)
1393  ti1 = 0.01;
1394 
1395  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400  }
1401 }
1402 
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 
1406 {
1407  OutputFile *of = output_files[ost->file_index];
1408  int i;
1409 
1410  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1411 
1412  if (of->shortest) {
1413  for (i = 0; i < of->ctx->nb_streams; i++)
1415  }
1416 }
1417 
1418 /**
1419  * Get and encode new output from any of the filtergraphs, without causing
1420  * activity.
1421  *
1422  * @return 0 for success, <0 for severe errors
1423  */
1424 static int reap_filters(int flush)
1425 {
1426  AVFrame *filtered_frame = NULL;
1427  int i;
1428 
1429  /* Reap all buffers present in the buffer sinks */
1430  for (i = 0; i < nb_output_streams; i++) {
1432  OutputFile *of = output_files[ost->file_index];
1434  AVCodecContext *enc = ost->enc_ctx;
1435  int ret = 0;
1436 
1437  if (!ost->filter || !ost->filter->graph->graph)
1438  continue;
1439  filter = ost->filter->filter;
1440 
1441  if (!ost->initialized) {
1442  char error[1024] = "";
1443  ret = init_output_stream(ost, error, sizeof(error));
1444  if (ret < 0) {
1445  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446  ost->file_index, ost->index, error);
1447  exit_program(1);
1448  }
1449  }
1450 
1451  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452  return AVERROR(ENOMEM);
1453  }
1454  filtered_frame = ost->filtered_frame;
1455 
1456  while (1) {
1457  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1460  if (ret < 0) {
1461  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1463  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464  } else if (flush && ret == AVERROR_EOF) {
1467  }
1468  break;
1469  }
1470  if (ost->finished) {
1471  av_frame_unref(filtered_frame);
1472  continue;
1473  }
1474  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1477  AVRational tb = enc->time_base;
1478  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 
1480  tb.den <<= extra_bits;
1481  float_pts =
1482  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1484  float_pts /= 1 << extra_bits;
1485  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 
1488  filtered_frame->pts =
1489  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1491  }
1492 
1493  switch (av_buffersink_get_type(filter)) {
1494  case AVMEDIA_TYPE_VIDEO:
1495  if (!ost->frame_aspect_ratio.num)
1496  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 
1498  if (debug_ts) {
1499  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501  float_pts,
1502  enc->time_base.num, enc->time_base.den);
1503  }
1504 
1505  do_video_out(of, ost, filtered_frame, float_pts);
1506  break;
1507  case AVMEDIA_TYPE_AUDIO:
1508  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509  enc->channels != filtered_frame->channels) {
1511  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512  break;
1513  }
1514  do_audio_out(of, ost, filtered_frame);
1515  break;
1516  default:
1517  // TODO support subtitle filters
1518  av_assert0(0);
1519  }
1520 
1521  av_frame_unref(filtered_frame);
1522  }
1523  }
1524 
1525  return 0;
1526 }
1527 
1528 static void print_final_stats(int64_t total_size)
1529 {
1530  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531  uint64_t subtitle_size = 0;
1532  uint64_t data_size = 0;
1533  float percent = -1.0;
1534  int i, j;
1535  int pass1_used = 1;
1536 
1537  for (i = 0; i < nb_output_streams; i++) {
1539  switch (ost->enc_ctx->codec_type) {
1540  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543  default: other_size += ost->data_size; break;
1544  }
1545  extra_size += ost->enc_ctx->extradata_size;
1546  data_size += ost->data_size;
1547  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1549  pass1_used = 0;
1550  }
1551 
1552  if (data_size && total_size>0 && total_size >= data_size)
1553  percent = 100.0 * (total_size - data_size) / data_size;
1554 
1555  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556  video_size / 1024.0,
1557  audio_size / 1024.0,
1558  subtitle_size / 1024.0,
1559  other_size / 1024.0,
1560  extra_size / 1024.0);
1561  if (percent >= 0.0)
1562  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563  else
1564  av_log(NULL, AV_LOG_INFO, "unknown");
1565  av_log(NULL, AV_LOG_INFO, "\n");
1566 
1567  /* print verbose per-stream stats */
1568  for (i = 0; i < nb_input_files; i++) {
1569  InputFile *f = input_files[i];
1570  uint64_t total_packets = 0, total_size = 0;
1571 
1572  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573  i, f->ctx->url);
1574 
1575  for (j = 0; j < f->nb_streams; j++) {
1576  InputStream *ist = input_streams[f->ist_index + j];
1577  enum AVMediaType type = ist->dec_ctx->codec_type;
1578 
1579  total_size += ist->data_size;
1580  total_packets += ist->nb_packets;
1581 
1582  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583  i, j, media_type_string(type));
1584  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585  ist->nb_packets, ist->data_size);
1586 
1587  if (ist->decoding_needed) {
1588  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589  ist->frames_decoded);
1590  if (type == AVMEDIA_TYPE_AUDIO)
1591  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592  av_log(NULL, AV_LOG_VERBOSE, "; ");
1593  }
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, "\n");
1596  }
1597 
1598  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599  total_packets, total_size);
1600  }
1601 
1602  for (i = 0; i < nb_output_files; i++) {
1603  OutputFile *of = output_files[i];
1604  uint64_t total_packets = 0, total_size = 0;
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607  i, of->ctx->url);
1608 
1609  for (j = 0; j < of->ctx->nb_streams; j++) {
1611  enum AVMediaType type = ost->enc_ctx->codec_type;
1612 
1613  total_size += ost->data_size;
1614  total_packets += ost->packets_written;
1615 
1616  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617  i, j, media_type_string(type));
1618  if (ost->encoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620  ost->frames_encoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627  ost->packets_written, ost->data_size);
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, "\n");
1630  }
1631 
1632  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633  total_packets, total_size);
1634  }
1635  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637  if (pass1_used) {
1638  av_log(NULL, AV_LOG_WARNING, "\n");
1639  } else {
1640  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1641  }
1642  }
1643 }
1644 
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 {
1647  AVBPrint buf, buf_script;
1648  OutputStream *ost;
1649  AVFormatContext *oc;
1650  int64_t total_size;
1651  AVCodecContext *enc;
1652  int frame_number, vid, i;
1653  double bitrate;
1654  double speed;
1655  int64_t pts = INT64_MIN + 1;
1656  static int64_t last_time = -1;
1657  static int qp_histogram[52];
1658  int hours, mins, secs, us;
1659  const char *hours_sign;
1660  int ret;
1661  float t;
1662 
1663  if (!print_stats && !is_last_report && !progress_avio)
1664  return;
1665 
1666  if (!is_last_report) {
1667  if (last_time == -1) {
1668  last_time = cur_time;
1669  return;
1670  }
1671  if ((cur_time - last_time) < 500000)
1672  return;
1673  last_time = cur_time;
1674  }
1675 
1676  t = (cur_time-timer_start) / 1000000.0;
1677 
1678 
1679  oc = output_files[0]->ctx;
1680 
1681  total_size = avio_size(oc->pb);
1682  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683  total_size = avio_tell(oc->pb);
1684 
1685  vid = 0;
1687  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1688  for (i = 0; i < nb_output_streams; i++) {
1689  float q = -1;
1690  ost = output_streams[i];
1691  enc = ost->enc_ctx;
1692  if (!ost->stream_copy)
1693  q = ost->quality / (float) FF_QP2LAMBDA;
1694 
1695  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696  av_bprintf(&buf, "q=%2.1f ", q);
1697  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698  ost->file_index, ost->index, q);
1699  }
1700  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701  float fps;
1702 
1703  frame_number = ost->frame_number;
1704  fps = t > 1 ? frame_number / t : 0;
1705  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1706  frame_number, fps < 9.95, fps, q);
1707  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1709  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710  ost->file_index, ost->index, q);
1711  if (is_last_report)
1712  av_bprintf(&buf, "L");
1713  if (qp_hist) {
1714  int j;
1715  int qp = lrintf(q);
1716  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717  qp_histogram[qp]++;
1718  for (j = 0; j < 32; j++)
1719  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1720  }
1721 
1722  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723  int j;
1724  double error, error_sum = 0;
1725  double scale, scale_sum = 0;
1726  double p;
1727  char type[3] = { 'Y','U','V' };
1728  av_bprintf(&buf, "PSNR=");
1729  for (j = 0; j < 3; j++) {
1730  if (is_last_report) {
1731  error = enc->error[j];
1732  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733  } else {
1734  error = ost->error[j];
1735  scale = enc->width * enc->height * 255.0 * 255.0;
1736  }
1737  if (j)
1738  scale /= 4;
1739  error_sum += error;
1740  scale_sum += scale;
1741  p = psnr(error / scale);
1742  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1743  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744  ost->file_index, ost->index, type[j] | 32, p);
1745  }
1746  p = psnr(error_sum / scale_sum);
1747  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1748  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749  ost->file_index, ost->index, p);
1750  }
1751  vid = 1;
1752  }
1753  /* compute min output value */
1756  ost->st->time_base, AV_TIME_BASE_Q));
1757  if (is_last_report)
1758  nb_frames_drop += ost->last_dropped;
1759  }
1760 
1761  secs = FFABS(pts) / AV_TIME_BASE;
1762  us = FFABS(pts) % AV_TIME_BASE;
1763  mins = secs / 60;
1764  secs %= 60;
1765  hours = mins / 60;
1766  mins %= 60;
1767  hours_sign = (pts < 0) ? "-" : "";
1768 
1769  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 
1772  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1773  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1774  if (pts == AV_NOPTS_VALUE) {
1775  av_bprintf(&buf, "N/A ");
1776  } else {
1777  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1778  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1779  }
1780 
1781  if (bitrate < 0) {
1782  av_bprintf(&buf, "bitrate=N/A");
1783  av_bprintf(&buf_script, "bitrate=N/A\n");
1784  }else{
1785  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1786  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787  }
1788 
1789  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791  if (pts == AV_NOPTS_VALUE) {
1792  av_bprintf(&buf_script, "out_time_us=N/A\n");
1793  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1794  av_bprintf(&buf_script, "out_time=N/A\n");
1795  } else {
1796  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1797  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1798  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1799  hours_sign, hours, mins, secs, us);
1800  }
1801 
1803  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1804  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1805  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1806 
1807  if (speed < 0) {
1808  av_bprintf(&buf, " speed=N/A");
1809  av_bprintf(&buf_script, "speed=N/A\n");
1810  } else {
1811  av_bprintf(&buf, " speed=%4.3gx", speed);
1812  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1813  }
1814 
1815  if (print_stats || is_last_report) {
1816  const char end = is_last_report ? '\n' : '\r';
1817  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1818  fprintf(stderr, "%s %c", buf.str, end);
1819  } else
1820  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1821 
1822  fflush(stderr);
1823  }
1825 
1826  if (progress_avio) {
1827  av_bprintf(&buf_script, "progress=%s\n",
1828  is_last_report ? "end" : "continue");
1829  avio_write(progress_avio, buf_script.str,
1830  FFMIN(buf_script.len, buf_script.size - 1));
1832  av_bprint_finalize(&buf_script, NULL);
1833  if (is_last_report) {
1834  if ((ret = avio_closep(&progress_avio)) < 0)
1836  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1837  }
1838  }
1839 
1840  if (is_last_report)
1841  print_final_stats(total_size);
1842 }
1843 
1845 {
1846  // We never got any input. Set a fake format, which will
1847  // come from libavformat.
1848  ifilter->format = par->format;
1849  ifilter->sample_rate = par->sample_rate;
1850  ifilter->channels = par->channels;
1851  ifilter->channel_layout = par->channel_layout;
1852  ifilter->width = par->width;
1853  ifilter->height = par->height;
1854  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1855 }
1856 
1857 static void flush_encoders(void)
1858 {
1859  int i, ret;
1860 
1861  for (i = 0; i < nb_output_streams; i++) {
1863  AVCodecContext *enc = ost->enc_ctx;
1864  OutputFile *of = output_files[ost->file_index];
1865 
1866  if (!ost->encoding_needed)
1867  continue;
1868 
1869  // Try to enable encoding with no input frames.
1870  // Maybe we should just let encoding fail instead.
1871  if (!ost->initialized) {
1872  FilterGraph *fg = ost->filter->graph;
1873  char error[1024] = "";
1874 
1876  "Finishing stream %d:%d without any data written to it.\n",
1877  ost->file_index, ost->st->index);
1878 
1879  if (ost->filter && !fg->graph) {
1880  int x;
1881  for (x = 0; x < fg->nb_inputs; x++) {
1882  InputFilter *ifilter = fg->inputs[x];
1883  if (ifilter->format < 0)
1884  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1885  }
1886 
1888  continue;
1889 
1890  ret = configure_filtergraph(fg);
1891  if (ret < 0) {
1892  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1893  exit_program(1);
1894  }
1895 
1897  }
1898 
1899  ret = init_output_stream(ost, error, sizeof(error));
1900  if (ret < 0) {
1901  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1902  ost->file_index, ost->index, error);
1903  exit_program(1);
1904  }
1905  }
1906 
1907  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1908  continue;
1909 
1911  continue;
1912 
1913  for (;;) {
1914  const char *desc = NULL;
1915  AVPacket pkt;
1916  int pkt_size;
1917 
1918  switch (enc->codec_type) {
1919  case AVMEDIA_TYPE_AUDIO:
1920  desc = "audio";
1921  break;
1922  case AVMEDIA_TYPE_VIDEO:
1923  desc = "video";
1924  break;
1925  default:
1926  av_assert0(0);
1927  }
1928 
1929  av_init_packet(&pkt);
1930  pkt.data = NULL;
1931  pkt.size = 0;
1932 
1934 
1935  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1936  ret = avcodec_send_frame(enc, NULL);
1937  if (ret < 0) {
1938  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1939  desc,
1940  av_err2str(ret));
1941  exit_program(1);
1942  }
1943  }
1944 
1945  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1946  if (ret < 0 && ret != AVERROR_EOF) {
1947  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948  desc,
1949  av_err2str(ret));
1950  exit_program(1);
1951  }
1952  if (ost->logfile && enc->stats_out) {
1953  fprintf(ost->logfile, "%s", enc->stats_out);
1954  }
1955  if (ret == AVERROR_EOF) {
1956  output_packet(of, &pkt, ost, 1);
1957  break;
1958  }
1959  if (ost->finished & MUXER_FINISHED) {
1960  av_packet_unref(&pkt);
1961  continue;
1962  }
1963  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1964  pkt_size = pkt.size;
1965  output_packet(of, &pkt, ost, 0);
1966  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1967  do_video_stats(ost, pkt_size);
1968  }
1969  }
1970  }
1971 }
1972 
1973 /*
1974  * Check whether a packet from ist should be written into ost at this time
1975  */
1977 {
1978  OutputFile *of = output_files[ost->file_index];
1979  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1980 
1981  if (ost->source_index != ist_index)
1982  return 0;
1983 
1984  if (ost->finished)
1985  return 0;
1986 
1987  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988  return 0;
1989 
1990  return 1;
1991 }
1992 
1994 {
1995  OutputFile *of = output_files[ost->file_index];
1996  InputFile *f = input_files [ist->file_index];
1997  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1998  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1999  AVPacket opkt = { 0 };
2000 
2001  av_init_packet(&opkt);
2002 
2003  // EOF: flush output bitstream filters.
2004  if (!pkt) {
2005  output_packet(of, &opkt, ost, 1);
2006  return;
2007  }
2008 
2009  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2010  !ost->copy_initial_nonkeyframes)
2011  return;
2012 
2013  if (!ost->frame_number && !ost->copy_prior_start) {
2014  int64_t comp_start = start_time;
2015  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2016  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2017  if (pkt->pts == AV_NOPTS_VALUE ?
2018  ist->pts < comp_start :
2019  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2020  return;
2021  }
2022 
2023  if (of->recording_time != INT64_MAX &&
2024  ist->pts >= of->recording_time + start_time) {
2026  return;
2027  }
2028 
2029  if (f->recording_time != INT64_MAX) {
2030  start_time = f->ctx->start_time;
2031  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2032  start_time += f->start_time;
2033  if (ist->pts >= f->recording_time + start_time) {
2035  return;
2036  }
2037  }
2038 
2039  /* force the input stream PTS */
2040  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2041  ost->sync_opts++;
2042 
2043  if (pkt->pts != AV_NOPTS_VALUE)
2044  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2045  else
2046  opkt.pts = AV_NOPTS_VALUE;
2047 
2048  if (pkt->dts == AV_NOPTS_VALUE)
2049  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2050  else
2051  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2052  opkt.dts -= ost_tb_start_time;
2053 
2056  if(!duration)
2057  duration = ist->dec_ctx->frame_size;
2058  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2059  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2060  ost->mux_timebase) - ost_tb_start_time;
2061  }
2062 
2063  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2064 
2065  opkt.flags = pkt->flags;
2066 
2067  if (pkt->buf) {
2068  opkt.buf = av_buffer_ref(pkt->buf);
2069  if (!opkt.buf)
2070  exit_program(1);
2071  }
2072  opkt.data = pkt->data;
2073  opkt.size = pkt->size;
2074 
2075  av_copy_packet_side_data(&opkt, pkt);
2076 
2077  output_packet(of, &opkt, ost, 0);
2078 }
2079 
2081 {
2082  AVCodecContext *dec = ist->dec_ctx;
2083 
2084  if (!dec->channel_layout) {
2085  char layout_name[256];
2086 
2087  if (dec->channels > ist->guess_layout_max)
2088  return 0;
2090  if (!dec->channel_layout)
2091  return 0;
2092  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2093  dec->channels, dec->channel_layout);
2094  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2095  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2096  }
2097  return 1;
2098 }
2099 
2100 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2101 {
2102  if (*got_output || ret<0)
2103  decode_error_stat[ret<0] ++;
2104 
2105  if (ret < 0 && exit_on_error)
2106  exit_program(1);
2107 
2108  if (*got_output && ist) {
2111  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2112  if (exit_on_error)
2113  exit_program(1);
2114  }
2115  }
2116 }
2117 
2118 // Filters can be configured only if the formats of all inputs are known.
2120 {
2121  int i;
2122  for (i = 0; i < fg->nb_inputs; i++) {
2123  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2124  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2125  return 0;
2126  }
2127  return 1;
2128 }
2129 
2131 {
2132  FilterGraph *fg = ifilter->graph;
2133  int need_reinit, ret, i;
2134 
2135  /* determine if the parameters for this input changed */
2136  need_reinit = ifilter->format != frame->format;
2137 
2138  switch (ifilter->ist->st->codecpar->codec_type) {
2139  case AVMEDIA_TYPE_AUDIO:
2140  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2141  ifilter->channels != frame->channels ||
2142  ifilter->channel_layout != frame->channel_layout;
2143  break;
2144  case AVMEDIA_TYPE_VIDEO:
2145  need_reinit |= ifilter->width != frame->width ||
2146  ifilter->height != frame->height;
2147  break;
2148  }
2149 
2150  if (!ifilter->ist->reinit_filters && fg->graph)
2151  need_reinit = 0;
2152 
2153  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2154  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2155  need_reinit = 1;
2156 
2157  if (need_reinit) {
2159  if (ret < 0)
2160  return ret;
2161  }
2162 
2163  /* (re)init the graph if possible, otherwise buffer the frame and return */
2164  if (need_reinit || !fg->graph) {
2165  for (i = 0; i < fg->nb_inputs; i++) {
2166  if (!ifilter_has_all_input_formats(fg)) {
2168  if (!tmp)
2169  return AVERROR(ENOMEM);
2171 
2172  if (!av_fifo_space(ifilter->frame_queue)) {
2173  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2174  if (ret < 0) {
2175  av_frame_free(&tmp);
2176  return ret;
2177  }
2178  }
2179  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2180  return 0;
2181  }
2182  }
2183 
2184  ret = reap_filters(1);
2185  if (ret < 0 && ret != AVERROR_EOF) {
2186  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2187  return ret;
2188  }
2189 
2190  ret = configure_filtergraph(fg);
2191  if (ret < 0) {
2192  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2193  return ret;
2194  }
2195  }
2196 
2198  if (ret < 0) {
2199  if (ret != AVERROR_EOF)
2200  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2201  return ret;
2202  }
2203 
2204  return 0;
2205 }
2206 
2207 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2208 {
2209  int ret;
2210 
2211  ifilter->eof = 1;
2212 
2213  if (ifilter->filter) {
2215  if (ret < 0)
2216  return ret;
2217  } else {
2218  // the filtergraph was never configured
2219  if (ifilter->format < 0)
2220  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2221  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2222  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2223  return AVERROR_INVALIDDATA;
2224  }
2225  }
2226 
2227  return 0;
2228 }
2229 
2230 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2231 // There is the following difference: if you got a frame, you must call
2232 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2233 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2234 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2235 {
2236  int ret;
2237 
2238  *got_frame = 0;
2239 
2240  if (pkt) {
2241  ret = avcodec_send_packet(avctx, pkt);
2242  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2243  // decoded frames with avcodec_receive_frame() until done.
2244  if (ret < 0 && ret != AVERROR_EOF)
2245  return ret;
2246  }
2247 
2248  ret = avcodec_receive_frame(avctx, frame);
2249  if (ret < 0 && ret != AVERROR(EAGAIN))
2250  return ret;
2251  if (ret >= 0)
2252  *got_frame = 1;
2253 
2254  return 0;
2255 }
2256 
2257 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2258 {
2259  int i, ret;
2260  AVFrame *f;
2261 
2262  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2263  for (i = 0; i < ist->nb_filters; i++) {
2264  if (i < ist->nb_filters - 1) {
2265  f = ist->filter_frame;
2266  ret = av_frame_ref(f, decoded_frame);
2267  if (ret < 0)
2268  break;
2269  } else
2270  f = decoded_frame;
2271  ret = ifilter_send_frame(ist->filters[i], f);
2272  if (ret == AVERROR_EOF)
2273  ret = 0; /* ignore */
2274  if (ret < 0) {
2276  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2277  break;
2278  }
2279  }
2280  return ret;
2281 }
2282 
2283 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2284  int *decode_failed)
2285 {
2286  AVFrame *decoded_frame;
2287  AVCodecContext *avctx = ist->dec_ctx;
2288  int ret, err = 0;
2289  AVRational decoded_frame_tb;
2290 
2291  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2292  return AVERROR(ENOMEM);
2293  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2294  return AVERROR(ENOMEM);
2295  decoded_frame = ist->decoded_frame;
2296 
2298  ret = decode(avctx, decoded_frame, got_output, pkt);
2299  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2300  if (ret < 0)
2301  *decode_failed = 1;
2302 
2303  if (ret >= 0 && avctx->sample_rate <= 0) {
2304  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2306  }
2307 
2308  if (ret != AVERROR_EOF)
2309  check_decode_result(ist, got_output, ret);
2310 
2311  if (!*got_output || ret < 0)
2312  return ret;
2313 
2314  ist->samples_decoded += decoded_frame->nb_samples;
2315  ist->frames_decoded++;
2316 
2317  /* increment next_dts to use for the case where the input stream does not
2318  have timestamps or there are multiple frames in the packet */
2319  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320  avctx->sample_rate;
2321  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2322  avctx->sample_rate;
2323 
2324  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2325  decoded_frame_tb = ist->st->time_base;
2326  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2327  decoded_frame->pts = pkt->pts;
2328  decoded_frame_tb = ist->st->time_base;
2329  }else {
2330  decoded_frame->pts = ist->dts;
2331  decoded_frame_tb = AV_TIME_BASE_Q;
2332  }
2333  if (decoded_frame->pts != AV_NOPTS_VALUE)
2334  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2335  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2336  (AVRational){1, avctx->sample_rate});
2337  ist->nb_samples = decoded_frame->nb_samples;
2338  err = send_frame_to_filters(ist, decoded_frame);
2339 
2341  av_frame_unref(decoded_frame);
2342  return err < 0 ? err : ret;
2343 }
2344 
2345 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2346  int *decode_failed)
2347 {
2348  AVFrame *decoded_frame;
2349  int i, ret = 0, err = 0;
2350  int64_t best_effort_timestamp;
2351  int64_t dts = AV_NOPTS_VALUE;
2352  AVPacket avpkt;
2353 
2354  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2355  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2356  // skip the packet.
2357  if (!eof && pkt && pkt->size == 0)
2358  return 0;
2359 
2360  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2361  return AVERROR(ENOMEM);
2362  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2363  return AVERROR(ENOMEM);
2364  decoded_frame = ist->decoded_frame;
2365  if (ist->dts != AV_NOPTS_VALUE)
2366  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2367  if (pkt) {
2368  avpkt = *pkt;
2369  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2370  }
2371 
2372  // The old code used to set dts on the drain packet, which does not work
2373  // with the new API anymore.
2374  if (eof) {
2375  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2376  if (!new)
2377  return AVERROR(ENOMEM);
2378  ist->dts_buffer = new;
2379  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2380  }
2381 
2383  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2384  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2385  if (ret < 0)
2386  *decode_failed = 1;
2387 
2388  // The following line may be required in some cases where there is no parser
2389  // or the parser does not has_b_frames correctly
2390  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2391  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2392  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2393  } else
2395  "video_delay is larger in decoder than demuxer %d > %d.\n"
2396  "If you want to help, upload a sample "
2397  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2398  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2399  ist->dec_ctx->has_b_frames,
2400  ist->st->codecpar->video_delay);
2401  }
2402 
2403  if (ret != AVERROR_EOF)
2404  check_decode_result(ist, got_output, ret);
2405 
2406  if (*got_output && ret >= 0) {
2407  if (ist->dec_ctx->width != decoded_frame->width ||
2408  ist->dec_ctx->height != decoded_frame->height ||
2409  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2410  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2411  decoded_frame->width,
2412  decoded_frame->height,
2413  decoded_frame->format,
2414  ist->dec_ctx->width,
2415  ist->dec_ctx->height,
2416  ist->dec_ctx->pix_fmt);
2417  }
2418  }
2419 
2420  if (!*got_output || ret < 0)
2421  return ret;
2422 
2423  if(ist->top_field_first>=0)
2424  decoded_frame->top_field_first = ist->top_field_first;
2425 
2426  ist->frames_decoded++;
2427 
2428  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2429  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2430  if (err < 0)
2431  goto fail;
2432  }
2433  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2434 
2435  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2436  *duration_pts = decoded_frame->pkt_duration;
2437 
2438  if (ist->framerate.num)
2439  best_effort_timestamp = ist->cfr_next_pts++;
2440 
2441  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2442  best_effort_timestamp = ist->dts_buffer[0];
2443 
2444  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2445  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2446  ist->nb_dts_buffer--;
2447  }
2448 
2449  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2450  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2451 
2452  if (ts != AV_NOPTS_VALUE)
2453  ist->next_pts = ist->pts = ts;
2454  }
2455 
2456  if (debug_ts) {
2457  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2458  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2459  ist->st->index, av_ts2str(decoded_frame->pts),
2460  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2461  best_effort_timestamp,
2462  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2463  decoded_frame->key_frame, decoded_frame->pict_type,
2464  ist->st->time_base.num, ist->st->time_base.den);
2465  }
2466 
2467  if (ist->st->sample_aspect_ratio.num)
2468  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2469 
2470  err = send_frame_to_filters(ist, decoded_frame);
2471 
2472 fail:
2474  av_frame_unref(decoded_frame);
2475  return err < 0 ? err : ret;
2476 }
2477 
2478 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2479  int *decode_failed)
2480 {
2481  AVSubtitle subtitle;
2482  int free_sub = 1;
2483  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2484  &subtitle, got_output, pkt);
2485 
2486  check_decode_result(NULL, got_output, ret);
2487 
2488  if (ret < 0 || !*got_output) {
2489  *decode_failed = 1;
2490  if (!pkt->size)
2491  sub2video_flush(ist);
2492  return ret;
2493  }
2494 
2495  if (ist->fix_sub_duration) {
2496  int end = 1;
2497  if (ist->prev_sub.got_output) {
2498  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2499  1000, AV_TIME_BASE);
2500  if (end < ist->prev_sub.subtitle.end_display_time) {
2501  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2502  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2504  end <= 0 ? ", dropping it" : "");
2506  }
2507  }
2508  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2509  FFSWAP(int, ret, ist->prev_sub.ret);
2510  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2511  if (end <= 0)
2512  goto out;
2513  }
2514 
2515  if (!*got_output)
2516  return ret;
2517 
2518  if (ist->sub2video.frame) {
2519  sub2video_update(ist, &subtitle);
2520  } else if (ist->nb_filters) {
2521  if (!ist->sub2video.sub_queue)
2522  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2523  if (!ist->sub2video.sub_queue)
2524  exit_program(1);
2525  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2527  if (ret < 0)
2528  exit_program(1);
2529  }
2530  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2531  free_sub = 0;
2532  }
2533 
2534  if (!subtitle.num_rects)
2535  goto out;
2536 
2537  ist->frames_decoded++;
2538 
2539  for (i = 0; i < nb_output_streams; i++) {
2541 
2542  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2543  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2544  continue;
2545 
2546  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2547  }
2548 
2549 out:
2550  if (free_sub)
2551  avsubtitle_free(&subtitle);
2552  return ret;
2553 }
2554 
2556 {
2557  int i, ret;
2558  /* TODO keep pts also in stream time base to avoid converting back */
2559  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2561 
2562  for (i = 0; i < ist->nb_filters; i++) {
2563  ret = ifilter_send_eof(ist->filters[i], pts);
2564  if (ret < 0)
2565  return ret;
2566  }
2567  return 0;
2568 }
2569 
2570 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2571 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2572 {
2573  int ret = 0, i;
2574  int repeating = 0;
2575  int eof_reached = 0;
2576 
2577  AVPacket avpkt;
2578  if (!ist->saw_first_ts) {
2579  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2580  ist->pts = 0;
2581  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2582  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2583  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2584  }
2585  ist->saw_first_ts = 1;
2586  }
2587 
2588  if (ist->next_dts == AV_NOPTS_VALUE)
2589  ist->next_dts = ist->dts;
2590  if (ist->next_pts == AV_NOPTS_VALUE)
2591  ist->next_pts = ist->pts;
2592 
2593  if (!pkt) {
2594  /* EOF handling */
2595  av_init_packet(&avpkt);
2596  avpkt.data = NULL;
2597  avpkt.size = 0;
2598  } else {
2599  avpkt = *pkt;
2600  }
2601 
2602  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2603  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2604  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2605  ist->next_pts = ist->pts = ist->dts;
2606  }
2607 
2608  // while we have more to decode or while the decoder did output something on EOF
2609  while (ist->decoding_needed) {
2610  int64_t duration_dts = 0;
2611  int64_t duration_pts = 0;
2612  int got_output = 0;
2613  int decode_failed = 0;
2614 
2615  ist->pts = ist->next_pts;
2616  ist->dts = ist->next_dts;
2617 
2618  switch (ist->dec_ctx->codec_type) {
2619  case AVMEDIA_TYPE_AUDIO:
2620  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2621  &decode_failed);
2622  break;
2623  case AVMEDIA_TYPE_VIDEO:
2624  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2625  &decode_failed);
2626  if (!repeating || !pkt || got_output) {
2627  if (pkt && pkt->duration) {
2628  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2629  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2631  duration_dts = ((int64_t)AV_TIME_BASE *
2632  ist->dec_ctx->framerate.den * ticks) /
2634  }
2635 
2636  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2637  ist->next_dts += duration_dts;
2638  }else
2639  ist->next_dts = AV_NOPTS_VALUE;
2640  }
2641 
2642  if (got_output) {
2643  if (duration_pts > 0) {
2644  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2645  } else {
2646  ist->next_pts += duration_dts;
2647  }
2648  }
2649  break;
2650  case AVMEDIA_TYPE_SUBTITLE:
2651  if (repeating)
2652  break;
2653  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2654  if (!pkt && ret >= 0)
2655  ret = AVERROR_EOF;
2656  break;
2657  default:
2658  return -1;
2659  }
2660 
2661  if (ret == AVERROR_EOF) {
2662  eof_reached = 1;
2663  break;
2664  }
2665 
2666  if (ret < 0) {
2667  if (decode_failed) {
2668  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2669  ist->file_index, ist->st->index, av_err2str(ret));
2670  } else {
2671  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2672  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2673  }
2674  if (!decode_failed || exit_on_error)
2675  exit_program(1);
2676  break;
2677  }
2678 
2679  if (got_output)
2680  ist->got_output = 1;
2681 
2682  if (!got_output)
2683  break;
2684 
2685  // During draining, we might get multiple output frames in this loop.
2686  // ffmpeg.c does not drain the filter chain on configuration changes,
2687  // which means if we send multiple frames at once to the filters, and
2688  // one of those frames changes configuration, the buffered frames will
2689  // be lost. This can upset certain FATE tests.
2690  // Decode only 1 frame per call on EOF to appease these FATE tests.
2691  // The ideal solution would be to rewrite decoding to use the new
2692  // decoding API in a better way.
2693  if (!pkt)
2694  break;
2695 
2696  repeating = 1;
2697  }
2698 
2699  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2700  /* except when looping we need to flush but not to send an EOF */
2701  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2702  int ret = send_filter_eof(ist);
2703  if (ret < 0) {
2704  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2705  exit_program(1);
2706  }
2707  }
2708 
2709  /* handle stream copy */
2710  if (!ist->decoding_needed && pkt) {
2711  ist->dts = ist->next_dts;
2712  switch (ist->dec_ctx->codec_type) {
2713  case AVMEDIA_TYPE_AUDIO:
2714  av_assert1(pkt->duration >= 0);
2715  if (ist->dec_ctx->sample_rate) {
2716  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2717  ist->dec_ctx->sample_rate;
2718  } else {
2720  }
2721  break;
2722  case AVMEDIA_TYPE_VIDEO:
2723  if (ist->framerate.num) {
2724  // TODO: Remove work-around for c99-to-c89 issue 7
2725  AVRational time_base_q = AV_TIME_BASE_Q;
2726  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2727  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2728  } else if (pkt->duration) {
2730  } else if(ist->dec_ctx->framerate.num != 0) {
2731  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2732  ist->next_dts += ((int64_t)AV_TIME_BASE *
2733  ist->dec_ctx->framerate.den * ticks) /
2735  }
2736  break;
2737  }
2738  ist->pts = ist->dts;
2739  ist->next_pts = ist->next_dts;
2740  }
2741  for (i = 0; i < nb_output_streams; i++) {
2743 
2744  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2745  continue;
2746 
2747  do_streamcopy(ist, ost, pkt);
2748  }
2749 
2750  return !eof_reached;
2751 }
2752 
2753 static void print_sdp(void)
2754 {
2755  char sdp[16384];
2756  int i;
2757  int j;
2758  AVIOContext *sdp_pb;
2759  AVFormatContext **avc;
2760 
2761  for (i = 0; i < nb_output_files; i++) {
2762  if (!output_files[i]->header_written)
2763  return;
2764  }
2765 
2766  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2767  if (!avc)
2768  exit_program(1);
2769  for (i = 0, j = 0; i < nb_output_files; i++) {
2770  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2771  avc[j] = output_files[i]->ctx;
2772  j++;
2773  }
2774  }
2775 
2776  if (!j)
2777  goto fail;
2778 
2779  av_sdp_create(avc, j, sdp, sizeof(sdp));
2780 
2781  if (!sdp_filename) {
2782  printf("SDP:\n%s\n", sdp);
2783  fflush(stdout);
2784  } else {
2785  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2786  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2787  } else {
2788  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2789  avio_closep(&sdp_pb);
2791  }
2792  }
2793 
2794 fail:
2795  av_freep(&avc);
2796 }
2797 
2799 {
2800  InputStream *ist = s->opaque;
2801  const enum AVPixelFormat *p;
2802  int ret;
2803 
2804  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2806  const AVCodecHWConfig *config = NULL;
2807  int i;
2808 
2809  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2810  break;
2811 
2812  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2813  ist->hwaccel_id == HWACCEL_AUTO) {
2814  for (i = 0;; i++) {
2815  config = avcodec_get_hw_config(s->codec, i);
2816  if (!config)
2817  break;
2818  if (!(config->methods &
2820  continue;
2821  if (config->pix_fmt == *p)
2822  break;
2823  }
2824  }
2825  if (config) {
2826  if (config->device_type != ist->hwaccel_device_type) {
2827  // Different hwaccel offered, ignore.
2828  continue;
2829  }
2830 
2832  if (ret < 0) {
2833  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2835  "%s hwaccel requested for input stream #%d:%d, "
2836  "but cannot be initialized.\n",
2838  ist->file_index, ist->st->index);
2839  return AV_PIX_FMT_NONE;
2840  }
2841  continue;
2842  }
2843  } else {
2844  const HWAccel *hwaccel = NULL;
2845  int i;
2846  for (i = 0; hwaccels[i].name; i++) {
2847  if (hwaccels[i].pix_fmt == *p) {
2848  hwaccel = &hwaccels[i];
2849  break;
2850  }
2851  }
2852  if (!hwaccel) {
2853  // No hwaccel supporting this pixfmt.
2854  continue;
2855  }
2856  if (hwaccel->id != ist->hwaccel_id) {
2857  // Does not match requested hwaccel.
2858  continue;
2859  }
2860 
2861  ret = hwaccel->init(s);
2862  if (ret < 0) {
2864  "%s hwaccel requested for input stream #%d:%d, "
2865  "but cannot be initialized.\n", hwaccel->name,
2866  ist->file_index, ist->st->index);
2867  return AV_PIX_FMT_NONE;
2868  }
2869  }
2870 
2871  if (ist->hw_frames_ctx) {
2872  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2873  if (!s->hw_frames_ctx)
2874  return AV_PIX_FMT_NONE;
2875  }
2876 
2877  ist->hwaccel_pix_fmt = *p;
2878  break;
2879  }
2880 
2881  return *p;
2882 }
2883 
2885 {
2886  InputStream *ist = s->opaque;
2887 
2888  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2889  return ist->hwaccel_get_buffer(s, frame, flags);
2890 
2892 }
2893 
2894 static int init_input_stream(int ist_index, char *error, int error_len)
2895 {
2896  int ret;
2897  InputStream *ist = input_streams[ist_index];
2898 
2899  if (ist->decoding_needed) {
2900  AVCodec *codec = ist->dec;
2901  if (!codec) {
2902  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2903  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2904  return AVERROR(EINVAL);
2905  }
2906 
2907  ist->dec_ctx->opaque = ist;
2908  ist->dec_ctx->get_format = get_format;
2909  ist->dec_ctx->get_buffer2 = get_buffer;
2910  ist->dec_ctx->thread_safe_callbacks = 1;
2911 
2912  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2913  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2914  (ist->decoding_needed & DECODING_FOR_OST)) {
2915  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2917  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2918  }
2919 
2920  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2921 
2922  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2923  * audio, and video decoders such as cuvid or mediacodec */
2924  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2925 
2926  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2927  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2928  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2930  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2931 
2933  if (ret < 0) {
2934  snprintf(error, error_len, "Device setup failed for "
2935  "decoder on input stream #%d:%d : %s",
2936  ist->file_index, ist->st->index, av_err2str(ret));
2937  return ret;
2938  }
2939 
2940  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2941  if (ret == AVERROR_EXPERIMENTAL)
2942  abort_codec_experimental(codec, 0);
2943 
2944  snprintf(error, error_len,
2945  "Error while opening decoder for input stream "
2946  "#%d:%d : %s",
2947  ist->file_index, ist->st->index, av_err2str(ret));
2948  return ret;
2949  }
2951  }
2952 
2953  ist->next_pts = AV_NOPTS_VALUE;
2954  ist->next_dts = AV_NOPTS_VALUE;
2955 
2956  return 0;
2957 }
2958 
2960 {
2961  if (ost->source_index >= 0)
2962  return input_streams[ost->source_index];
2963  return NULL;
2964 }
2965 
2966 static int compare_int64(const void *a, const void *b)
2967 {
2968  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2969 }
2970 
2971 /* open the muxer when all the streams are initialized */
2972 static int check_init_output_file(OutputFile *of, int file_index)
2973 {
2974  int ret, i;
2975 
2976  for (i = 0; i < of->ctx->nb_streams; i++) {
2978  if (!ost->initialized)
2979  return 0;
2980  }
2981 
2982  of->ctx->interrupt_callback = int_cb;
2983 
2984  ret = avformat_write_header(of->ctx, &of->opts);
2985  if (ret < 0) {
2987  "Could not write header for output file #%d "
2988  "(incorrect codec parameters ?): %s\n",
2989  file_index, av_err2str(ret));
2990  return ret;
2991  }
2992  //assert_avoptions(of->opts);
2993  of->header_written = 1;
2994 
2995  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2996 
2997  if (sdp_filename || want_sdp)
2998  print_sdp();
2999 
3000  /* flush the muxing queues */
3001  for (i = 0; i < of->ctx->nb_streams; i++) {
3003 
3004  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3005  if (!av_fifo_size(ost->muxing_queue))
3006  ost->mux_timebase = ost->st->time_base;
3007 
3008  while (av_fifo_size(ost->muxing_queue)) {
3009  AVPacket pkt;
3010  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3011  write_packet(of, &pkt, ost, 1);
3012  }
3013  }
3014 
3015  return 0;
3016 }
3017 
3019 {
3020  AVBSFContext *ctx;
3021  int i, ret;
3022 
3023  if (!ost->nb_bitstream_filters)
3024  return 0;
3025 
3026  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3027  ctx = ost->bsf_ctx[i];
3028 
3029  ret = avcodec_parameters_copy(ctx->par_in,
3030  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3031  if (ret < 0)
3032  return ret;
3033 
3034  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3035 
3036  ret = av_bsf_init(ctx);
3037  if (ret < 0) {
3038  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3039  ost->bsf_ctx[i]->filter->name);
3040  return ret;
3041  }
3042  }
3043 
3044  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3045  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3046  if (ret < 0)
3047  return ret;
3048 
3049  ost->st->time_base = ctx->time_base_out;
3050 
3051  return 0;
3052 }
3053 
3055 {
3056  OutputFile *of = output_files[ost->file_index];
3058  AVCodecParameters *par_dst = ost->st->codecpar;
3059  AVCodecParameters *par_src = ost->ref_par;
3060  AVRational sar;
3061  int i, ret;
3062  uint32_t codec_tag = par_dst->codec_tag;
3063 
3064  av_assert0(ist && !ost->filter);
3065 
3066  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3067  if (ret >= 0)
3068  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3069  if (ret < 0) {
3071  "Error setting up codec context options.\n");
3072  return ret;
3073  }
3074 
3075  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3076  if (ret < 0) {
3078  "Error getting reference codec parameters.\n");
3079  return ret;
3080  }
3081 
3082  if (!codec_tag) {
3083  unsigned int codec_tag_tmp;
3084  if (!of->ctx->oformat->codec_tag ||
3085  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3086  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3087  codec_tag = par_src->codec_tag;
3088  }
3089 
3090  ret = avcodec_parameters_copy(par_dst, par_src);
3091  if (ret < 0)
3092  return ret;
3093 
3094  par_dst->codec_tag = codec_tag;
3095 
3096  if (!ost->frame_rate.num)
3097  ost->frame_rate = ist->framerate;
3098  ost->st->avg_frame_rate = ost->frame_rate;
3099 
3101  if (ret < 0)
3102  return ret;
3103 
3104  // copy timebase while removing common factors
3105  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3107 
3108  // copy estimated duration as a hint to the muxer
3109  if (ost->st->duration <= 0 && ist->st->duration > 0)
3110  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3111 
3112  // copy disposition
3113  ost->st->disposition = ist->st->disposition;
3114 
3115  if (ist->st->nb_side_data) {
3116  for (i = 0; i < ist->st->nb_side_data; i++) {
3117  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3118  uint8_t *dst_data;
3119 
3120  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3121  if (!dst_data)
3122  return AVERROR(ENOMEM);
3123  memcpy(dst_data, sd_src->data, sd_src->size);
3124  }
3125  }
3126 
3127  if (ost->rotate_overridden) {
3129  sizeof(int32_t) * 9);
3130  if (sd)
3131  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3132  }
3133 
3134  switch (par_dst->codec_type) {
3135  case AVMEDIA_TYPE_AUDIO:
3136  if (audio_volume != 256) {
3137  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3138  exit_program(1);
3139  }
3140  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3141  par_dst->block_align= 0;
3142  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3143  par_dst->block_align= 0;
3144  break;
3145  case AVMEDIA_TYPE_VIDEO:
3146  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3147  sar =
3148  av_mul_q(ost->frame_aspect_ratio,
3149  (AVRational){ par_dst->height, par_dst->width });
3150  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3151  "with stream copy may produce invalid files\n");
3152  }
3153  else if (ist->st->sample_aspect_ratio.num)
3154  sar = ist->st->sample_aspect_ratio;
3155  else
3156  sar = par_src->sample_aspect_ratio;
3157  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3158  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3159  ost->st->r_frame_rate = ist->st->r_frame_rate;
3160  break;
3161  }
3162 
3163  ost->mux_timebase = ist->st->time_base;
3164 
3165  return 0;
3166 }
3167 
3169 {
3170  AVDictionaryEntry *e;
3171 
3172  uint8_t *encoder_string;
3173  int encoder_string_len;
3174  int format_flags = 0;
3175  int codec_flags = ost->enc_ctx->flags;
3176 
3177  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3178  return;
3179 
3180  e = av_dict_get(of->opts, "fflags", NULL, 0);
3181  if (e) {
3182  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3183  if (!o)
3184  return;
3185  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3186  }
3187  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3188  if (e) {
3189  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3190  if (!o)
3191  return;
3192  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3193  }
3194 
3195  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3196  encoder_string = av_mallocz(encoder_string_len);
3197  if (!encoder_string)
3198  exit_program(1);
3199 
3200  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3201  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3202  else
3203  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3204  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3205  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3207 }
3208 
3210  AVCodecContext *avctx)
3211 {
3212  char *p;
3213  int n = 1, i, size, index = 0;
3214  int64_t t, *pts;
3215 
3216  for (p = kf; *p; p++)
3217  if (*p == ',')
3218  n++;
3219  size = n;
3220  pts = av_malloc_array(size, sizeof(*pts));
3221  if (!pts) {
3222  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3223  exit_program(1);
3224  }
3225 
3226  p = kf;
3227  for (i = 0; i < n; i++) {
3228  char *next = strchr(p, ',');
3229 
3230  if (next)
3231  *next++ = 0;
3232 
3233  if (!memcmp(p, "chapters", 8)) {
3234 
3235  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3236  int j;
3237 
3238  if (avf->nb_chapters > INT_MAX - size ||
3239  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3240  sizeof(*pts)))) {
3242  "Could not allocate forced key frames array.\n");
3243  exit_program(1);
3244  }
3245  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3246  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3247 
3248  for (j = 0; j < avf->nb_chapters; j++) {
3249  AVChapter *c = avf->chapters[j];
3250  av_assert1(index < size);
3251  pts[index++] = av_rescale_q(c->start, c->time_base,
3252  avctx->time_base) + t;
3253  }
3254 
3255  } else {
3256 
3257  t = parse_time_or_die("force_key_frames", p, 1);
3258  av_assert1(index < size);
3259  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3260 
3261  }
3262 
3263  p = next;
3264  }
3265 
3266  av_assert0(index == size);
3267  qsort(pts, size, sizeof(*pts), compare_int64);
3268  ost->forced_kf_count = size;
3269  ost->forced_kf_pts = pts;
3270 }
3271 
3272 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3273 {
3275  AVCodecContext *enc_ctx = ost->enc_ctx;
3276  AVFormatContext *oc;
3277 
3278  if (ost->enc_timebase.num > 0) {
3279  enc_ctx->time_base = ost->enc_timebase;
3280  return;
3281  }
3282 
3283  if (ost->enc_timebase.num < 0) {
3284  if (ist) {
3285  enc_ctx->time_base = ist->st->time_base;
3286  return;
3287  }
3288 
3289  oc = output_files[ost->file_index]->ctx;
3290  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3291  }
3292 
3293  enc_ctx->time_base = default_time_base;
3294 }
3295 
3297 {
3299  AVCodecContext *enc_ctx = ost->enc_ctx;
3301  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3302  int j, ret;
3303 
3304  set_encoder_id(output_files[ost->file_index], ost);
3305 
3306  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3307  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3308  // which have to be filtered out to prevent leaking them to output files.
3309  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3310 
3311  if (ist) {
3312  ost->st->disposition = ist->st->disposition;
3313 
3314  dec_ctx = ist->dec_ctx;
3315 
3317  } else {
3318  for (j = 0; j < oc->nb_streams; j++) {
3319  AVStream *st = oc->streams[j];
3320  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3321  break;
3322  }
3323  if (j == oc->nb_streams)
3324  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3327  }
3328 
3329  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3330  if (!ost->frame_rate.num)
3331  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3332  if (ist && !ost->frame_rate.num)
3333  ost->frame_rate = ist->framerate;
3334  if (ist && !ost->frame_rate.num)
3335  ost->frame_rate = ist->st->r_frame_rate;
3336  if (ist && !ost->frame_rate.num) {
3337  ost->frame_rate = (AVRational){25, 1};
3339  "No information "
3340  "about the input framerate is available. Falling "
3341  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3342  "if you want a different framerate.\n",
3343  ost->file_index, ost->index);
3344  }
3345 
3346  if (ost->enc->supported_framerates && !ost->force_fps) {
3347  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3348  ost->frame_rate = ost->enc->supported_framerates[idx];
3349  }
3350  // reduce frame rate for mpeg4 to be within the spec limits
3351  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3352  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3353  ost->frame_rate.num, ost->frame_rate.den, 65535);
3354  }
3355  }
3356 
3357  switch (enc_ctx->codec_type) {
3358  case AVMEDIA_TYPE_AUDIO:
3359  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3360  if (dec_ctx)
3362  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3363  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3364  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3365  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3366 
3368  break;
3369 
3370  case AVMEDIA_TYPE_VIDEO:
3371  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3372 
3373  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3374  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3375  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3377  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3378  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3379  }
3380  for (j = 0; j < ost->forced_kf_count; j++)
3381  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3383  enc_ctx->time_base);
3384 
3385  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3386  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3387  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3388  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3389  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3390  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3391 
3392  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3393  if (dec_ctx)
3395  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3396 
3397  enc_ctx->framerate = ost->frame_rate;
3398 
3399  ost->st->avg_frame_rate = ost->frame_rate;
3400 
3401  if (!dec_ctx ||
3402  enc_ctx->width != dec_ctx->width ||
3403  enc_ctx->height != dec_ctx->height ||
3404  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3406  }
3407 
3408  if (ost->top_field_first == 0) {
3409  enc_ctx->field_order = AV_FIELD_BB;
3410  } else if (ost->top_field_first == 1) {
3411  enc_ctx->field_order = AV_FIELD_TT;
3412  }
3413 
3414  if (ost->forced_keyframes) {
3415  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3416  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3418  if (ret < 0) {
3420  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3421  return ret;
3422  }
3423  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3424  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3425  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3426  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3427 
3428  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3429  // parse it only for static kf timings
3430  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3431  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3432  }
3433  }
3434  break;
3435  case AVMEDIA_TYPE_SUBTITLE:
3436  enc_ctx->time_base = AV_TIME_BASE_Q;
3437  if (!enc_ctx->width) {
3438  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3439  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3440  }
3441  break;
3442  case AVMEDIA_TYPE_DATA:
3443  break;
3444  default:
3445  abort();
3446  break;
3447  }
3448 
3449  ost->mux_timebase = enc_ctx->time_base;
3450 
3451  return 0;
3452 }
3453 
3454 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3455 {
3456  int ret = 0;
3457 
3458  if (ost->encoding_needed) {
3459  AVCodec *codec = ost->enc;
3460  AVCodecContext *dec = NULL;
3461  InputStream *ist;
3462 
3464  if (ret < 0)
3465  return ret;
3466 
3467  if ((ist = get_input_stream(ost)))
3468  dec = ist->dec_ctx;
3469  if (dec && dec->subtitle_header) {
3470  /* ASS code assumes this buffer is null terminated so add extra byte. */
3471  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3472  if (!ost->enc_ctx->subtitle_header)
3473  return AVERROR(ENOMEM);
3474  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3475  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3476  }
3477  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3478  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3479  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3480  !codec->defaults &&
3481  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3482  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3483  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3484 
3485  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3486  ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3487  av_buffersink_get_format(ost->filter->filter)) {
3488  ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3489  if (!ost->enc_ctx->hw_frames_ctx)
3490  return AVERROR(ENOMEM);
3491  } else {
3493  if (ret < 0) {
3494  snprintf(error, error_len, "Device setup failed for "
3495  "encoder on output stream #%d:%d : %s",
3496  ost->file_index, ost->index, av_err2str(ret));
3497  return ret;
3498  }
3499  }
3500  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3501  int input_props = 0, output_props = 0;
3502  AVCodecDescriptor const *input_descriptor =
3504  AVCodecDescriptor const *output_descriptor =
3505  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3506  if (input_descriptor)
3507  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3508  if (output_descriptor)
3509  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3510  if (input_props && output_props && input_props != output_props) {
3511  snprintf(error, error_len,
3512  "Subtitle encoding currently only possible from text to text "
3513  "or bitmap to bitmap");
3514  return AVERROR_INVALIDDATA;
3515  }
3516  }
3517 
3518  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3519  if (ret == AVERROR_EXPERIMENTAL)
3520  abort_codec_experimental(codec, 1);
3521  snprintf(error, error_len,
3522  "Error while opening encoder for output stream #%d:%d - "
3523  "maybe incorrect parameters such as bit_rate, rate, width or height",
3524  ost->file_index, ost->index);
3525  return ret;
3526  }
3527  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3528  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3529  av_buffersink_set_frame_size(ost->filter->filter,
3530  ost->enc_ctx->frame_size);
3531  assert_avoptions(ost->encoder_opts);
3532  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3533  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3534  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3535  " It takes bits/s as argument, not kbits/s\n");
3536 
3538  if (ret < 0) {
3540  "Error initializing the output stream codec context.\n");
3541  exit_program(1);
3542  }
3543  /*
3544  * FIXME: ost->st->codec should't be needed here anymore.
3545  */
3546  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3547  if (ret < 0)
3548  return ret;
3549 
3550  if (ost->enc_ctx->nb_coded_side_data) {
3551  int i;
3552 
3553  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3554  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3555  uint8_t *dst_data;
3556 
3557  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3558  if (!dst_data)
3559  return AVERROR(ENOMEM);
3560  memcpy(dst_data, sd_src->data, sd_src->size);
3561  }
3562  }
3563 
3564  /*
3565  * Add global input side data. For now this is naive, and copies it
3566  * from the input stream's global side data. All side data should
3567  * really be funneled over AVFrame and libavfilter, then added back to
3568  * packet side data, and then potentially using the first packet for
3569  * global side data.
3570  */
3571  if (ist) {
3572  int i;
3573  for (i = 0; i < ist->st->nb_side_data; i++) {
3574  AVPacketSideData *sd = &ist->st->side_data[i];
3575  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3576  if (!dst)
3577  return AVERROR(ENOMEM);
3578  memcpy(dst, sd->data, sd->size);
3579  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3580  av_display_rotation_set((uint32_t *)dst, 0);
3581  }
3582  }
3583 
3584  // copy timebase while removing common factors
3585  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3586  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3587 
3588  // copy estimated duration as a hint to the muxer
3589  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3590  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3591 
3592  ost->st->codec->codec= ost->enc_ctx->codec;
3593  } else if (ost->stream_copy) {
3595  if (ret < 0)
3596  return ret;
3597  }
3598 
3599  // parse user provided disposition, and update stream values
3600  if (ost->disposition) {
3601  static const AVOption opts[] = {
3602  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3603  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3604  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3605  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3606  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3607  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3608  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3609  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3610  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3611  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3612  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3613  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3614  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3615  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3616  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3617  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3618  { NULL },
3619  };
3620  static const AVClass class = {
3621  .class_name = "",
3622  .item_name = av_default_item_name,
3623  .option = opts,
3624  .version = LIBAVUTIL_VERSION_INT,
3625  };
3626  const AVClass *pclass = &class;
3627 
3628  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3629  if (ret < 0)
3630  return ret;
3631  }
3632 
3633  /* initialize bitstream filters for the output stream
3634  * needs to be done here, because the codec id for streamcopy is not
3635  * known until now */
3637  if (ret < 0)
3638  return ret;
3639 
3640  ost->initialized = 1;
3641 
3642  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3643  if (ret < 0)
3644  return ret;
3645 
3646  return ret;
3647 }
3648 
3649 static void report_new_stream(int input_index, AVPacket *pkt)
3650 {
3651  InputFile *file = input_files[input_index];
3652  AVStream *st = file->ctx->streams[pkt->stream_index];
3653 
3654  if (pkt->stream_index < file->nb_streams_warn)
3655  return;
3656  av_log(file->ctx, AV_LOG_WARNING,
3657  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3659  input_index, pkt->stream_index,
3660  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3661  file->nb_streams_warn = pkt->stream_index + 1;
3662 }
3663 
3664 static int transcode_init(void)
3665 {
3666  int ret = 0, i, j, k;
3667  AVFormatContext *oc;
3668  OutputStream *ost;
3669  InputStream *ist;
3670  char error[1024] = {0};
3671 
3672  for (i = 0; i < nb_filtergraphs; i++) {
3673  FilterGraph *fg = filtergraphs[i];
3674  for (j = 0; j < fg->nb_outputs; j++) {
3675  OutputFilter *ofilter = fg->outputs[j];
3676  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3677  continue;
3678  if (fg->nb_inputs != 1)
3679  continue;
3680  for (k = nb_input_streams-1; k >= 0 ; k--)
3681  if (fg->inputs[0]->ist == input_streams[k])
3682  break;
3683  ofilter->ost->source_index = k;
3684  }
3685  }
3686 
3687  /* init framerate emulation */
3688  for (i = 0; i < nb_input_files; i++) {
3690  if (ifile->rate_emu)
3691  for (j = 0; j < ifile->nb_streams; j++)
3692  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3693  }
3694 
3695  /* init input streams */
3696  for (i = 0; i < nb_input_streams; i++)
3697  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3698  for (i = 0; i < nb_output_streams; i++) {
3699  ost = output_streams[i];
3700  avcodec_close(ost->enc_ctx);
3701  }
3702  goto dump_format;
3703  }
3704 
3705  /* open each encoder */
3706  for (i = 0; i < nb_output_streams; i++) {
3707  // skip streams fed from filtergraphs until we have a frame for them
3708  if (output_streams[i]->filter)
3709  continue;
3710 
3712  if (ret < 0)
3713  goto dump_format;
3714  }
3715 
3716  /* discard unused programs */
3717  for (i = 0; i < nb_input_files; i++) {
3719  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3720  AVProgram *p = ifile->ctx->programs[j];
3721  int discard = AVDISCARD_ALL;
3722 
3723  for (k = 0; k < p->nb_stream_indexes; k++)
3724  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3725  discard = AVDISCARD_DEFAULT;
3726  break;
3727  }
3728  p->discard = discard;
3729  }
3730  }
3731 
3732  /* write headers for files with no streams */
3733  for (i = 0; i < nb_output_files; i++) {
3734  oc = output_files[i]->ctx;
3735  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3737  if (ret < 0)
3738  goto dump_format;
3739  }
3740  }
3741 
3742  dump_format:
3743  /* dump the stream mapping */
3744  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3745  for (i = 0; i < nb_input_streams; i++) {
3746  ist = input_streams[i];
3747 
3748  for (j = 0; j < ist->nb_filters; j++) {
3749  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3750  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3751  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3752  ist->filters[j]->name);
3753  if (nb_filtergraphs > 1)
3754  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3755  av_log(NULL, AV_LOG_INFO, "\n");
3756  }
3757  }
3758  }
3759 
3760  for (i = 0; i < nb_output_streams; i++) {
3761  ost = output_streams[i];
3762 
3763  if (ost->attachment_filename) {
3764  /* an attached file */
3765  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3766  ost->attachment_filename, ost->file_index, ost->index);
3767  continue;
3768  }
3769 
3770  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3771  /* output from a complex graph */
3772  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3773  if (nb_filtergraphs > 1)
3774  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3775 
3776  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3777  ost->index, ost->enc ? ost->enc->name : "?");
3778  continue;
3779  }
3780 
3781  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3782  input_streams[ost->source_index]->file_index,
3783  input_streams[ost->source_index]->st->index,
3784  ost->file_index,
3785  ost->index);
3786  if (ost->sync_ist != input_streams[ost->source_index])
3787  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3788  ost->sync_ist->file_index,
3789  ost->sync_ist->st->index);
3790  if (ost->stream_copy)
3791  av_log(NULL, AV_LOG_INFO, " (copy)");
3792  else {
3793  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3794  const AVCodec *out_codec = ost->enc;
3795  const char *decoder_name = "?";
3796  const char *in_codec_name = "?";
3797  const char *encoder_name = "?";
3798  const char *out_codec_name = "?";
3799  const AVCodecDescriptor *desc;
3800 
3801  if (in_codec) {
3802  decoder_name = in_codec->name;
3803  desc = avcodec_descriptor_get(in_codec->id);
3804  if (desc)
3805  in_codec_name = desc->name;
3806  if (!strcmp(decoder_name, in_codec_name))
3807  decoder_name = "native";
3808  }
3809 
3810  if (out_codec) {
3811  encoder_name = out_codec->name;
3812  desc = avcodec_descriptor_get(out_codec->id);
3813  if (desc)
3814  out_codec_name = desc->name;
3815  if (!strcmp(encoder_name, out_codec_name))
3816  encoder_name = "native";
3817  }
3818 
3819  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3820  in_codec_name, decoder_name,
3821  out_codec_name, encoder_name);
3822  }
3823  av_log(NULL, AV_LOG_INFO, "\n");
3824  }
3825 
3826  if (ret) {
3827  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3828  return ret;
3829  }
3830 
3832 
3833  return 0;
3834 }
3835 
3836 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3837 static int need_output(void)
3838 {
3839  int i;
3840 
3841  for (i = 0; i < nb_output_streams; i++) {
3843  OutputFile *of = output_files[ost->file_index];
3844  AVFormatContext *os = output_files[ost->file_index]->ctx;
3845 
3846  if (ost->finished ||
3847  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3848  continue;
3849  if (ost->frame_number >= ost->max_frames) {
3850  int j;
3851  for (j = 0; j < of->ctx->nb_streams; j++)
3853  continue;
3854  }
3855 
3856  return 1;
3857  }
3858 
3859  return 0;
3860 }
3861 
3862 /**
3863  * Select the output stream to process.
3864  *
3865  * @return selected output stream, or NULL if none available
3866  */
3868 {
3869  int i;
3870  int64_t opts_min = INT64_MAX;
3871  OutputStream *ost_min = NULL;
3872 
3873  for (i = 0; i < nb_output_streams; i++) {
3875  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3876  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3877  AV_TIME_BASE_Q);
3878  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3880  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3881  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3882 
3883  if (!ost->initialized && !ost->inputs_done)
3884  return ost;
3885 
3886  if (!ost->finished && opts < opts_min) {
3887  opts_min = opts;
3888  ost_min = ost->unavailable ? NULL : ost;
3889  }
3890  }
3891  return ost_min;
3892 }
3893 
3894 static void set_tty_echo(int on)
3895 {
3896 #if HAVE_TERMIOS_H
3897  struct termios tty;
3898  if (tcgetattr(0, &tty) == 0) {
3899  if (on) tty.c_lflag |= ECHO;
3900  else tty.c_lflag &= ~ECHO;
3901  tcsetattr(0, TCSANOW, &tty);
3902  }
3903 #endif
3904 }
3905 
3906 static int check_keyboard_interaction(int64_t cur_time)
3907 {
3908  int i, ret, key;
3909  static int64_t last_time;
3910  if (received_nb_signals)
3911  return AVERROR_EXIT;
3912  /* read_key() returns 0 on EOF */
3913  if(cur_time - last_time >= 100000 && !run_as_daemon){
3914  key = read_key();
3915  last_time = cur_time;
3916  }else
3917  key = -1;
3918  if (key == 'q')
3919  return AVERROR_EXIT;
3920  if (key == '+') av_log_set_level(av_log_get_level()+10);
3921  if (key == '-') av_log_set_level(av_log_get_level()-10);
3922  if (key == 's') qp_hist ^= 1;
3923  if (key == 'h'){
3924  if (do_hex_dump){
3925  do_hex_dump = do_pkt_dump = 0;
3926  } else if(do_pkt_dump){
3927  do_hex_dump = 1;
3928  } else
3929  do_pkt_dump = 1;
3931  }
3932  if (key == 'c' || key == 'C'){
3933  char buf[4096], target[64], command[256], arg[256] = {0};
3934  double time;
3935  int k, n = 0;
3936  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3937  i = 0;
3938  set_tty_echo(1);
3939  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3940  if (k > 0)
3941  buf[i++] = k;
3942  buf[i] = 0;
3943  set_tty_echo(0);
3944  fprintf(stderr, "\n");
3945  if (k > 0 &&
3946  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3947  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3948  target, time, command, arg);
3949  for (i = 0; i < nb_filtergraphs; i++) {
3950  FilterGraph *fg = filtergraphs[i];
3951  if (fg->graph) {
3952  if (time < 0) {
3953  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3954  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3955  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3956  } else if (key == 'c') {
3957  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3959  } else {
3960  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3961  if (ret < 0)
3962  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3963  }
3964  }
3965  }
3966  } else {
3968  "Parse error, at least 3 arguments were expected, "
3969  "only %d given in string '%s'\n", n, buf);
3970  }
3971  }
3972  if (key == 'd' || key == 'D'){
3973  int debug=0;
3974  if(key == 'D') {
3975  debug = input_streams[0]->st->codec->debug<<1;
3976  if(!debug) debug = 1;
3977  while(debug & (FF_DEBUG_DCT_COEFF
3978 #if FF_API_DEBUG_MV
3979  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3980 #endif
3981  )) //unsupported, would just crash
3982  debug += debug;
3983  }else{
3984  char buf[32];
3985  int k = 0;
3986  i = 0;
3987  set_tty_echo(1);
3988  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3989  if (k > 0)
3990  buf[i++] = k;
3991  buf[i] = 0;
3992  set_tty_echo(0);
3993  fprintf(stderr, "\n");
3994  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3995  fprintf(stderr,"error parsing debug value\n");
3996  }
3997  for(i=0;i<nb_input_streams;i++) {
3998  input_streams[i]->st->codec->debug = debug;
3999  }
4000  for(i=0;i<nb_output_streams;i++) {
4002  ost->enc_ctx->debug = debug;
4003  }
4004  if(debug) av_log_set_level(AV_LOG_DEBUG);
4005  fprintf(stderr,"debug=%d\n", debug);
4006  }
4007  if (key == '?'){
4008  fprintf(stderr, "key function\n"
4009  "? show this help\n"
4010  "+ increase verbosity\n"
4011  "- decrease verbosity\n"
4012  "c Send command to first matching filter supporting it\n"
4013  "C Send/Queue command to all matching filters\n"
4014  "D cycle through available debug modes\n"
4015  "h dump packets/hex press to cycle through the 3 states\n"
4016  "q quit\n"
4017  "s Show QP histogram\n"
4018  );
4019  }
4020  return 0;
4021 }
4022 
4023 #if HAVE_THREADS
4024 static void *input_thread(void *arg)
4025 {
4026  InputFile *f = arg;
4027  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4028  int ret = 0;
4029 
4030  while (1) {
4031  AVPacket pkt;
4032  ret = av_read_frame(f->ctx, &pkt);
4033 
4034  if (ret == AVERROR(EAGAIN)) {
4035  av_usleep(10000);
4036  continue;
4037  }
4038  if (ret < 0) {
4039  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4040  break;
4041  }
4042  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4043  if (flags && ret == AVERROR(EAGAIN)) {
4044  flags = 0;
4045  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4046  av_log(f->ctx, AV_LOG_WARNING,
4047  "Thread message queue blocking; consider raising the "
4048  "thread_queue_size option (current value: %d)\n",
4049  f->thread_queue_size);
4050  }
4051  if (ret < 0) {
4052  if (ret != AVERROR_EOF)
4053  av_log(f->ctx, AV_LOG_ERROR,
4054  "Unable to send packet to main thread: %s\n",
4055  av_err2str(ret));
4056  av_packet_unref(&pkt);
4057  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4058  break;
4059  }
4060  }
4061 
4062  return NULL;
4063 }
4064 
4065 static void free_input_thread(int i)
4066 {
4067  InputFile *f = input_files[i];
4068  AVPacket pkt;
4069 
4070  if (!f || !f->in_thread_queue)
4071  return;
4073  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4074  av_packet_unref(&pkt);
4075 
4076  pthread_join(f->thread, NULL);
4077  f->joined = 1;
4078  av_thread_message_queue_free(&f->in_thread_queue);
4079 }
4080 
4081 static void free_input_threads(void)
4082 {
4083  int i;
4084 
4085  for (i = 0; i < nb_input_files; i++)
4086  free_input_thread(i);
4087 }
4088 
4089 static int init_input_thread(int i)
4090 {
4091  int ret;
4092  InputFile *f = input_files[i];
4093 
4094  if (nb_input_files == 1)
4095  return 0;
4096 
4097  if (f->ctx->pb ? !f->ctx->pb->seekable :
4098  strcmp(f->ctx->iformat->name, "lavfi"))
4099  f->non_blocking = 1;
4100  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4101  f->thread_queue_size, sizeof(AVPacket));
4102  if (ret < 0)
4103  return ret;
4104 
4105  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4106  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4107  av_thread_message_queue_free(&f->in_thread_queue);
4108  return AVERROR(ret);
4109  }
4110 
4111  return 0;
4112 }
4113 
4114 static int init_input_threads(void)
4115 {
4116  int i, ret;
4117 
4118  for (i = 0; i < nb_input_files; i++) {
4119  ret = init_input_thread(i);
4120  if (ret < 0)
4121  return ret;
4122  }
4123  return 0;
4124 }
4125 
4126 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4127 {
4128  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4129  f->non_blocking ?
4131 }
4132 #endif
4133 
4135 {
4136  if (f->rate_emu) {
4137  int i;
4138  for (i = 0; i < f->nb_streams; i++) {
4139  InputStream *ist = input_streams[f->ist_index + i];
4140  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4141  int64_t now = av_gettime_relative() - ist->start;
4142  if (pts > now)
4143  return AVERROR(EAGAIN);
4144  }
4145  }
4146 
4147 #if HAVE_THREADS
4148  if (nb_input_files > 1)
4149  return get_input_packet_mt(f, pkt);
4150 #endif
4151  return av_read_frame(f->ctx, pkt);
4152 }
4153 
4154 static int got_eagain(void)
4155 {
4156  int i;
4157  for (i = 0; i < nb_output_streams; i++)
4158  if (output_streams[i]->unavailable)
4159  return 1;
4160  return 0;
4161 }
4162 
4163 static void reset_eagain(void)
4164 {
4165  int i;
4166  for (i = 0; i < nb_input_files; i++)
4167  input_files[i]->eagain = 0;
4168  for (i = 0; i < nb_output_streams; i++)
4169  output_streams[i]->unavailable = 0;
4170 }
4171 
4172 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4173 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4174  AVRational time_base)
4175 {
4176  int ret;
4177 
4178  if (!*duration) {
4179  *duration = tmp;
4180  return tmp_time_base;
4181  }
4182 
4183  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4184  if (ret < 0) {
4185  *duration = tmp;
4186  return tmp_time_base;
4187  }
4188 
4189  return time_base;
4190 }
4191 
4193 {
4194  InputStream *ist;
4195  AVCodecContext *avctx;
4196  int i, ret, has_audio = 0;
4197  int64_t duration = 0;
4198 
4199  ret = av_seek_frame(is, -1, is->start_time, 0);
4200  if (ret < 0)
4201  return ret;
4202 
4203  for (i = 0; i < ifile->nb_streams; i++) {
4204  ist = input_streams[ifile->ist_index + i];
4205  avctx = ist->dec_ctx;
4206 
4207  /* duration is the length of the last frame in a stream
4208  * when audio stream is present we don't care about
4209  * last video frame length because it's not defined exactly */
4210  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4211  has_audio = 1;
4212  }
4213 
4214  for (i = 0; i < ifile->nb_streams; i++) {
4215  ist = input_streams[ifile->ist_index + i];
4216  avctx = ist->dec_ctx;
4217 
4218  if (has_audio) {
4219  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4220  AVRational sample_rate = {1, avctx->sample_rate};
4221 
4223