FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int current_time;
134 
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194  r->x, r->y, r->w, r->h, w, h
195  );
196  return;
197  }
198 
199  dst += r->y * dst_linesize + r->x * 4;
200  src = r->data[0];
201  pal = (uint32_t *)r->data[1];
202  for (y = 0; y < r->h; y++) {
203  dst2 = (uint32_t *)dst;
204  src2 = src;
205  for (x = 0; x < r->w; x++)
206  *(dst2++) = pal[*(src2++)];
207  dst += dst_linesize;
208  src += r->linesize[0];
209  }
210 }
211 
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
213 {
214  AVFrame *frame = ist->sub2video.frame;
215  int i;
216 
217  av_assert1(frame->data[0]);
218  ist->sub2video.last_pts = frame->pts = pts;
219  for (i = 0; i < ist->nb_filters; i++)
223 }
224 
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226 {
227  AVFrame *frame = ist->sub2video.frame;
228  int8_t *dst;
229  int dst_linesize;
230  int num_rects, i;
231  int64_t pts, end_pts;
232 
233  if (!frame)
234  return;
235  if (sub) {
236  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237  AV_TIME_BASE_Q, ist->st->time_base);
238  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  num_rects = sub->num_rects;
241  } else {
242  pts = ist->sub2video.end_pts;
243  end_pts = INT64_MAX;
244  num_rects = 0;
245  }
246  if (sub2video_get_blank_frame(ist) < 0) {
248  "Impossible to get a blank canvas.\n");
249  return;
250  }
251  dst = frame->data [0];
252  dst_linesize = frame->linesize[0];
253  for (i = 0; i < num_rects; i++)
254  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255  sub2video_push_ref(ist, pts);
256  ist->sub2video.end_pts = end_pts;
257 }
258 
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260 {
261  InputFile *infile = input_files[ist->file_index];
262  int i, j, nb_reqs;
263  int64_t pts2;
264 
265  /* When a frame is read from a file, examine all sub2video streams in
266  the same file and send the sub2video frame again. Otherwise, decoded
267  video frames could be accumulating in the filter graph while a filter
268  (possibly overlay) is desperately waiting for a subtitle frame. */
269  for (i = 0; i < infile->nb_streams; i++) {
270  InputStream *ist2 = input_streams[infile->ist_index + i];
271  if (!ist2->sub2video.frame)
272  continue;
273  /* subtitles seem to be usually muxed ahead of other streams;
274  if not, subtracting a larger time here is necessary */
275  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276  /* do not send the heartbeat frame if the subtitle is already ahead */
277  if (pts2 <= ist2->sub2video.last_pts)
278  continue;
279  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280  sub2video_update(ist2, NULL);
281  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283  if (nb_reqs)
284  sub2video_push_ref(ist2, pts2);
285  }
286 }
287 
288 static void sub2video_flush(InputStream *ist)
289 {
290  int i;
291 
292  if (ist->sub2video.end_pts < INT64_MAX)
293  sub2video_update(ist, NULL);
294  for (i = 0; i < ist->nb_filters; i++)
296 }
297 
298 /* end of sub2video hack */
299 
300 static void term_exit_sigsafe(void)
301 {
302 #if HAVE_TERMIOS_H
303  if(restore_tty)
304  tcsetattr (0, TCSANOW, &oldtty);
305 #endif
306 }
307 
308 void term_exit(void)
309 {
310  av_log(NULL, AV_LOG_QUIET, "%s", "");
312 }
313 
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
319 
320 static void
322 {
323  received_sigterm = sig;
326  if(received_nb_signals > 3) {
327  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328  strlen("Received > 3 system signals, hard exiting\n"));
329 
330  exit(123);
331  }
332 }
333 
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336 {
337  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338 
339  switch (fdwCtrlType)
340  {
341  case CTRL_C_EVENT:
342  case CTRL_BREAK_EVENT:
343  sigterm_handler(SIGINT);
344  return TRUE;
345 
346  case CTRL_CLOSE_EVENT:
347  case CTRL_LOGOFF_EVENT:
348  case CTRL_SHUTDOWN_EVENT:
349  sigterm_handler(SIGTERM);
350  /* Basically, with these 3 events, when we return from this method the
351  process is hard terminated, so stall as long as we need to
352  to try and let the main thread(s) clean up and gracefully terminate
353  (we have at most 5 seconds, but should be done far before that). */
354  while (!ffmpeg_exited) {
355  Sleep(0);
356  }
357  return TRUE;
358 
359  default:
360  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361  return FALSE;
362  }
363 }
364 #endif
365 
366 void term_init(void)
367 {
368 #if HAVE_TERMIOS_H
369  if(!run_as_daemon){
370  struct termios tty;
371  if (tcgetattr (0, &tty) == 0) {
372  oldtty = tty;
373  restore_tty = 1;
374 
375  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376  |INLCR|IGNCR|ICRNL|IXON);
377  tty.c_oflag |= OPOST;
378  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379  tty.c_cflag &= ~(CSIZE|PARENB);
380  tty.c_cflag |= CS8;
381  tty.c_cc[VMIN] = 1;
382  tty.c_cc[VTIME] = 0;
383 
384  tcsetattr (0, TCSANOW, &tty);
385  }
386  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387  }
388 #endif
389 
390  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392 #ifdef SIGXCPU
393  signal(SIGXCPU, sigterm_handler);
394 #endif
395 #if HAVE_SETCONSOLECTRLHANDLER
396  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397 #endif
398 }
399 
400 /* read a key without blocking */
401 static int read_key(void)
402 {
403  unsigned char ch;
404 #if HAVE_TERMIOS_H
405  int n = 1;
406  struct timeval tv;
407  fd_set rfds;
408 
409  FD_ZERO(&rfds);
410  FD_SET(0, &rfds);
411  tv.tv_sec = 0;
412  tv.tv_usec = 0;
413  n = select(1, &rfds, NULL, NULL, &tv);
414  if (n > 0) {
415  n = read(0, &ch, 1);
416  if (n == 1)
417  return ch;
418 
419  return n;
420  }
421 #elif HAVE_KBHIT
422 # if HAVE_PEEKNAMEDPIPE
423  static int is_pipe;
424  static HANDLE input_handle;
425  DWORD dw, nchars;
426  if(!input_handle){
427  input_handle = GetStdHandle(STD_INPUT_HANDLE);
428  is_pipe = !GetConsoleMode(input_handle, &dw);
429  }
430 
431  if (is_pipe) {
432  /* When running under a GUI, you will end here. */
433  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434  // input pipe may have been closed by the program that ran ffmpeg
435  return -1;
436  }
437  //Read it
438  if(nchars != 0) {
439  read(0, &ch, 1);
440  return ch;
441  }else{
442  return -1;
443  }
444  }
445 # endif
446  if(kbhit())
447  return(getch());
448 #endif
449  return -1;
450 }
451 
452 static int decode_interrupt_cb(void *ctx)
453 {
455 }
456 
458 
459 static void ffmpeg_cleanup(int ret)
460 {
461  int i, j;
462 
463  if (do_benchmark) {
464  int maxrss = getmaxrss() / 1024;
465  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466  }
467 
468  for (i = 0; i < nb_filtergraphs; i++) {
469  FilterGraph *fg = filtergraphs[i];
471  for (j = 0; j < fg->nb_inputs; j++) {
472  av_freep(&fg->inputs[j]->name);
473  av_freep(&fg->inputs[j]);
474  }
475  av_freep(&fg->inputs);
476  for (j = 0; j < fg->nb_outputs; j++) {
477  av_freep(&fg->outputs[j]->name);
478  av_freep(&fg->outputs[j]);
479  }
480  av_freep(&fg->outputs);
481  av_freep(&fg->graph_desc);
482 
483  av_freep(&filtergraphs[i]);
484  }
485  av_freep(&filtergraphs);
486 
488 
489  /* close files */
490  for (i = 0; i < nb_output_files; i++) {
491  OutputFile *of = output_files[i];
493  if (!of)
494  continue;
495  s = of->ctx;
496  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497  avio_closep(&s->pb);
499  av_dict_free(&of->opts);
500 
501  av_freep(&output_files[i]);
502  }
503  for (i = 0; i < nb_output_streams; i++) {
504  OutputStream *ost = output_streams[i];
506 
507  if (!ost)
508  continue;
509 
510  bsfc = ost->bitstream_filters;
511  while (bsfc) {
512  AVBitStreamFilterContext *next = bsfc->next;
514  bsfc = next;
515  }
516  ost->bitstream_filters = NULL;
518  av_frame_free(&ost->last_frame);
519 
520  av_parser_close(ost->parser);
521 
522  av_freep(&ost->forced_keyframes);
524  av_freep(&ost->avfilter);
525  av_freep(&ost->logfile_prefix);
526 
528  ost->audio_channels_mapped = 0;
529 
530  av_dict_free(&ost->sws_dict);
531 
533 
534  av_freep(&output_streams[i]);
535  }
536 #if HAVE_PTHREADS
537  free_input_threads();
538 #endif
539  for (i = 0; i < nb_input_files; i++) {
540  avformat_close_input(&input_files[i]->ctx);
541  av_freep(&input_files[i]);
542  }
543  for (i = 0; i < nb_input_streams; i++) {
544  InputStream *ist = input_streams[i];
545 
548  av_dict_free(&ist->decoder_opts);
551  av_freep(&ist->filters);
552  av_freep(&ist->hwaccel_device);
553 
555 
556  av_freep(&input_streams[i]);
557  }
558 
559  if (vstats_file) {
560  if (fclose(vstats_file))
562  "Error closing vstats file, loss of information possible: %s\n",
563  av_err2str(AVERROR(errno)));
564  }
566 
567  av_freep(&input_streams);
568  av_freep(&input_files);
569  av_freep(&output_streams);
570  av_freep(&output_files);
571 
572  uninit_opts();
573 
575 
576  if (received_sigterm) {
577  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578  (int) received_sigterm);
579  } else if (ret && transcode_init_done) {
580  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581  }
582  term_exit();
583  ffmpeg_exited = 1;
584 }
585 
587 {
588  AVDictionaryEntry *t = NULL;
589 
590  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592  }
593 }
594 
596 {
598  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600  exit_program(1);
601  }
602 }
603 
604 static void abort_codec_experimental(AVCodec *c, int encoder)
605 {
606  exit_program(1);
607 }
608 
609 static void update_benchmark(const char *fmt, ...)
610 {
611  if (do_benchmark_all) {
612  int64_t t = getutime();
613  va_list va;
614  char buf[1024];
615 
616  if (fmt) {
617  va_start(va, fmt);
618  vsnprintf(buf, sizeof(buf), fmt, va);
619  va_end(va);
620  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621  }
622  current_time = t;
623  }
624 }
625 
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 {
628  int i;
629  for (i = 0; i < nb_output_streams; i++) {
630  OutputStream *ost2 = output_streams[i];
631  ost2->finished |= ost == ost2 ? this_stream : others;
632  }
633 }
634 
636 {
638  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639  int ret;
640 
641  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642  ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643  if (ost->st->codec->extradata) {
644  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645  ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
646  }
647  }
648 
651  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652 
653  /*
654  * Audio encoders may split the packets -- #frames in != #packets out.
655  * But there is no reordering, so we can limit the number of output packets
656  * by simply dropping them here.
657  * Counting encoded video frames needs to be done separately because of
658  * reordering, see do_video_out()
659  */
660  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661  if (ost->frame_number >= ost->max_frames) {
662  av_packet_unref(pkt);
663  return;
664  }
665  ost->frame_number++;
666  }
667  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668  int i;
670  NULL);
671  ost->quality = sd ? AV_RL32(sd) : -1;
672  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
673 
674  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
675  if (sd && i < sd[5])
676  ost->error[i] = AV_RL64(sd + 8 + 8*i);
677  else
678  ost->error[i] = -1;
679  }
680 
681  if (ost->frame_rate.num && ost->is_cfr) {
682  if (pkt->duration > 0)
683  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
685  ost->st->time_base);
686  }
687  }
688 
689  if (bsfc)
691 
692  if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693  print_error("", ret);
694  if (exit_on_error)
695  exit_program(1);
696  }
697  if (pkt->size == 0 && pkt->side_data_elems == 0)
698  return;
699  if (!ost->st->codecpar->extradata && avctx->extradata) {
701  if (!ost->st->codecpar->extradata) {
702  av_log(NULL, AV_LOG_ERROR, "Could not allocate extradata buffer to copy parser data.\n");
703  exit_program(1);
704  }
705  ost->st->codecpar->extradata_size = avctx->extradata_size;
706  memcpy(ost->st->codecpar->extradata, avctx->extradata, avctx->extradata_size);
707  }
708 
709  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
710  if (pkt->dts != AV_NOPTS_VALUE &&
711  pkt->pts != AV_NOPTS_VALUE &&
712  pkt->dts > pkt->pts) {
713  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
714  pkt->dts, pkt->pts,
715  ost->file_index, ost->st->index);
716  pkt->pts =
717  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
718  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
719  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
720  }
721  if(
722  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
723  pkt->dts != AV_NOPTS_VALUE &&
724  !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
725  ost->last_mux_dts != AV_NOPTS_VALUE) {
726  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
727  if (pkt->dts < max) {
728  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
729  av_log(s, loglevel, "Non-monotonous DTS in output stream "
730  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
731  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
732  if (exit_on_error) {
733  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
734  exit_program(1);
735  }
736  av_log(s, loglevel, "changing to %"PRId64". This may result "
737  "in incorrect timestamps in the output file.\n",
738  max);
739  if(pkt->pts >= pkt->dts)
740  pkt->pts = FFMAX(pkt->pts, max);
741  pkt->dts = max;
742  }
743  }
744  }
745  ost->last_mux_dts = pkt->dts;
746 
747  ost->data_size += pkt->size;
748  ost->packets_written++;
749 
750  pkt->stream_index = ost->index;
751 
752  if (debug_ts) {
753  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
754  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
756  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
757  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
758  pkt->size
759  );
760  }
761 
762  ret = av_interleaved_write_frame(s, pkt);
763  if (ret < 0) {
764  print_error("av_interleaved_write_frame()", ret);
765  main_return_code = 1;
767  }
768  av_packet_unref(pkt);
769 }
770 
772 {
773  OutputFile *of = output_files[ost->file_index];
774 
775  ost->finished |= ENCODER_FINISHED;
776  if (of->shortest) {
777  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
778  of->recording_time = FFMIN(of->recording_time, end);
779  }
780 }
781 
783 {
784  OutputFile *of = output_files[ost->file_index];
785 
786  if (of->recording_time != INT64_MAX &&
788  AV_TIME_BASE_Q) >= 0) {
789  close_output_stream(ost);
790  return 0;
791  }
792  return 1;
793 }
794 
796  AVFrame *frame)
797 {
798  AVCodecContext *enc = ost->enc_ctx;
799  AVPacket pkt;
800  int got_packet = 0;
801 
802  av_init_packet(&pkt);
803  pkt.data = NULL;
804  pkt.size = 0;
805 
806  if (!check_recording_time(ost))
807  return;
808 
809  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
810  frame->pts = ost->sync_opts;
811  ost->sync_opts = frame->pts + frame->nb_samples;
812  ost->samples_encoded += frame->nb_samples;
813  ost->frames_encoded++;
814 
815  av_assert0(pkt.size || !pkt.data);
817  if (debug_ts) {
818  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
819  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
820  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
821  enc->time_base.num, enc->time_base.den);
822  }
823 
824  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
825  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
826  exit_program(1);
827  }
828  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
829 
830  if (got_packet) {
831  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
832 
833  if (debug_ts) {
834  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
835  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
836  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
837  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
838  }
839 
840  write_frame(s, &pkt, ost);
841  }
842 }
843 
845  OutputStream *ost,
846  InputStream *ist,
847  AVSubtitle *sub)
848 {
849  int subtitle_out_max_size = 1024 * 1024;
850  int subtitle_out_size, nb, i;
851  AVCodecContext *enc;
852  AVPacket pkt;
853  int64_t pts;
854 
855  if (sub->pts == AV_NOPTS_VALUE) {
856  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
857  if (exit_on_error)
858  exit_program(1);
859  return;
860  }
861 
862  enc = ost->enc_ctx;
863 
864  if (!subtitle_out) {
865  subtitle_out = av_malloc(subtitle_out_max_size);
866  if (!subtitle_out) {
867  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
868  exit_program(1);
869  }
870  }
871 
872  /* Note: DVB subtitle need one packet to draw them and one other
873  packet to clear them */
874  /* XXX: signal it in the codec context ? */
876  nb = 2;
877  else
878  nb = 1;
879 
880  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
881  pts = sub->pts;
882  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
883  pts -= output_files[ost->file_index]->start_time;
884  for (i = 0; i < nb; i++) {
885  unsigned save_num_rects = sub->num_rects;
886 
887  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
888  if (!check_recording_time(ost))
889  return;
890 
891  sub->pts = pts;
892  // start_display_time is required to be 0
893  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
895  sub->start_display_time = 0;
896  if (i == 1)
897  sub->num_rects = 0;
898 
899  ost->frames_encoded++;
900 
901  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
902  subtitle_out_max_size, sub);
903  if (i == 1)
904  sub->num_rects = save_num_rects;
905  if (subtitle_out_size < 0) {
906  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
907  exit_program(1);
908  }
909 
910  av_init_packet(&pkt);
911  pkt.data = subtitle_out;
912  pkt.size = subtitle_out_size;
913  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
914  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
915  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
916  /* XXX: the pts correction is handled here. Maybe handling
917  it in the codec would be better */
918  if (i == 0)
919  pkt.pts += 90 * sub->start_display_time;
920  else
921  pkt.pts += 90 * sub->end_display_time;
922  }
923  pkt.dts = pkt.pts;
924  write_frame(s, &pkt, ost);
925  }
926 }
927 
929  OutputStream *ost,
930  AVFrame *next_picture,
931  double sync_ipts)
932 {
933  int ret, format_video_sync;
934  AVPacket pkt;
935  AVCodecContext *enc = ost->enc_ctx;
936  AVCodecContext *mux_enc = ost->st->codec;
937  int nb_frames, nb0_frames, i;
938  double delta, delta0;
939  double duration = 0;
940  int frame_size = 0;
941  InputStream *ist = NULL;
943 
944  if (ost->source_index >= 0)
945  ist = input_streams[ost->source_index];
946 
947  if (filter->inputs[0]->frame_rate.num > 0 &&
948  filter->inputs[0]->frame_rate.den > 0)
949  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
950 
951  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
952  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
953 
954  if (!ost->filters_script &&
955  !ost->filters &&
956  next_picture &&
957  ist &&
958  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
959  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
960  }
961 
962  if (!next_picture) {
963  //end, flushing
964  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
965  ost->last_nb0_frames[1],
966  ost->last_nb0_frames[2]);
967  } else {
968  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
969  delta = delta0 + duration;
970 
971  /* by default, we output a single frame */
972  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
973  nb_frames = 1;
974 
975  format_video_sync = video_sync_method;
976  if (format_video_sync == VSYNC_AUTO) {
977  if(!strcmp(s->oformat->name, "avi")) {
978  format_video_sync = VSYNC_VFR;
979  } else
981  if ( ist
982  && format_video_sync == VSYNC_CFR
983  && input_files[ist->file_index]->ctx->nb_streams == 1
984  && input_files[ist->file_index]->input_ts_offset == 0) {
985  format_video_sync = VSYNC_VSCFR;
986  }
987  if (format_video_sync == VSYNC_CFR && copy_ts) {
988  format_video_sync = VSYNC_VSCFR;
989  }
990  }
991  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
992 
993  if (delta0 < 0 &&
994  delta > 0 &&
995  format_video_sync != VSYNC_PASSTHROUGH &&
996  format_video_sync != VSYNC_DROP) {
997  if (delta0 < -0.6) {
998  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
999  } else
1000  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1001  sync_ipts = ost->sync_opts;
1002  duration += delta0;
1003  delta0 = 0;
1004  }
1005 
1006  switch (format_video_sync) {
1007  case VSYNC_VSCFR:
1008  if (ost->frame_number == 0 && delta0 >= 0.5) {
1009  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1010  delta = duration;
1011  delta0 = 0;
1012  ost->sync_opts = lrint(sync_ipts);
1013  }
1014  case VSYNC_CFR:
1015  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1016  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1017  nb_frames = 0;
1018  } else if (delta < -1.1)
1019  nb_frames = 0;
1020  else if (delta > 1.1) {
1021  nb_frames = lrintf(delta);
1022  if (delta0 > 1.1)
1023  nb0_frames = lrintf(delta0 - 0.6);
1024  }
1025  break;
1026  case VSYNC_VFR:
1027  if (delta <= -0.6)
1028  nb_frames = 0;
1029  else if (delta > 0.6)
1030  ost->sync_opts = lrint(sync_ipts);
1031  break;
1032  case VSYNC_DROP:
1033  case VSYNC_PASSTHROUGH:
1034  ost->sync_opts = lrint(sync_ipts);
1035  break;
1036  default:
1037  av_assert0(0);
1038  }
1039  }
1040 
1041  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1042  nb0_frames = FFMIN(nb0_frames, nb_frames);
1043 
1044  memmove(ost->last_nb0_frames + 1,
1045  ost->last_nb0_frames,
1046  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1047  ost->last_nb0_frames[0] = nb0_frames;
1048 
1049  if (nb0_frames == 0 && ost->last_dropped) {
1050  nb_frames_drop++;
1052  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1053  ost->frame_number, ost->st->index, ost->last_frame->pts);
1054  }
1055  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1056  if (nb_frames > dts_error_threshold * 30) {
1057  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1058  nb_frames_drop++;
1059  return;
1060  }
1061  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1062  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1063  }
1064  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1065 
1066  /* duplicates frame if needed */
1067  for (i = 0; i < nb_frames; i++) {
1068  AVFrame *in_picture;
1069  av_init_packet(&pkt);
1070  pkt.data = NULL;
1071  pkt.size = 0;
1072 
1073  if (i < nb0_frames && ost->last_frame) {
1074  in_picture = ost->last_frame;
1075  } else
1076  in_picture = next_picture;
1077 
1078  if (!in_picture)
1079  return;
1080 
1081  in_picture->pts = ost->sync_opts;
1082 
1083 #if 1
1084  if (!check_recording_time(ost))
1085 #else
1086  if (ost->frame_number >= ost->max_frames)
1087 #endif
1088  return;
1089 
1090 #if FF_API_LAVF_FMT_RAWPICTURE
1091  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1092  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1093  /* raw pictures are written as AVPicture structure to
1094  avoid any copies. We support temporarily the older
1095  method. */
1096  if (in_picture->interlaced_frame)
1097  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1098  else
1099  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1100  pkt.data = (uint8_t *)in_picture;
1101  pkt.size = sizeof(AVPicture);
1102  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1103  pkt.flags |= AV_PKT_FLAG_KEY;
1104 
1105  write_frame(s, &pkt, ost);
1106  } else
1107 #endif
1108  {
1109  int got_packet, forced_keyframe = 0;
1110  double pts_time;
1111 
1113  ost->top_field_first >= 0)
1114  in_picture->top_field_first = !!ost->top_field_first;
1115 
1116  if (in_picture->interlaced_frame) {
1117  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1118  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1119  else
1120  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1121  } else
1122  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1123 
1124  in_picture->quality = enc->global_quality;
1125  in_picture->pict_type = 0;
1126 
1127  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1128  in_picture->pts * av_q2d(enc->time_base) : NAN;
1129  if (ost->forced_kf_index < ost->forced_kf_count &&
1130  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1131  ost->forced_kf_index++;
1132  forced_keyframe = 1;
1133  } else if (ost->forced_keyframes_pexpr) {
1134  double res;
1135  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1138  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1144  res);
1145  if (res) {
1146  forced_keyframe = 1;
1152  }
1153 
1155  } else if ( ost->forced_keyframes
1156  && !strncmp(ost->forced_keyframes, "source", 6)
1157  && in_picture->key_frame==1) {
1158  forced_keyframe = 1;
1159  }
1160 
1161  if (forced_keyframe) {
1162  in_picture->pict_type = AV_PICTURE_TYPE_I;
1163  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1164  }
1165 
1167  if (debug_ts) {
1168  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1169  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1170  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1171  enc->time_base.num, enc->time_base.den);
1172  }
1173 
1174  ost->frames_encoded++;
1175 
1176  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1177  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1178  if (ret < 0) {
1179  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1180  exit_program(1);
1181  }
1182 
1183  if (got_packet) {
1184  if (debug_ts) {
1185  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1188  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1189  }
1190 
1191  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1192  pkt.pts = ost->sync_opts;
1193 
1194  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1195 
1196  if (debug_ts) {
1197  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1198  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1199  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1200  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1201  }
1202 
1203  frame_size = pkt.size;
1204  write_frame(s, &pkt, ost);
1205 
1206  /* if two pass, output log */
1207  if (ost->logfile && enc->stats_out) {
1208  fprintf(ost->logfile, "%s", enc->stats_out);
1209  }
1210  }
1211  }
1212  ost->sync_opts++;
1213  /*
1214  * For video, number of frames in == number of packets out.
1215  * But there may be reordering, so we can't throw away frames on encoder
1216  * flush, we need to limit them here, before they go into encoder.
1217  */
1218  ost->frame_number++;
1219 
1220  if (vstats_filename && frame_size)
1221  do_video_stats(ost, frame_size);
1222  }
1223 
1224  if (!ost->last_frame)
1225  ost->last_frame = av_frame_alloc();
1226  av_frame_unref(ost->last_frame);
1227  if (next_picture && ost->last_frame)
1228  av_frame_ref(ost->last_frame, next_picture);
1229  else
1230  av_frame_free(&ost->last_frame);
1231 }
1232 
1233 static double psnr(double d)
1234 {
1235  return -10.0 * log10(d);
1236 }
1237 
1239 {
1240  AVCodecContext *enc;
1241  int frame_number;
1242  double ti1, bitrate, avg_bitrate;
1243 
1244  /* this is executed just the first time do_video_stats is called */
1245  if (!vstats_file) {
1246  vstats_file = fopen(vstats_filename, "w");
1247  if (!vstats_file) {
1248  perror("fopen");
1249  exit_program(1);
1250  }
1251  }
1252 
1253  enc = ost->enc_ctx;
1254  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1255  frame_number = ost->st->nb_frames;
1256  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1257  ost->quality / (float)FF_QP2LAMBDA);
1258 
1259  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1260  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1261 
1262  fprintf(vstats_file,"f_size= %6d ", frame_size);
1263  /* compute pts value */
1264  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1265  if (ti1 < 0.01)
1266  ti1 = 0.01;
1267 
1268  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1269  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1270  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1271  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1272  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1273  }
1274 }
1275 
1277 {
1278  OutputFile *of = output_files[ost->file_index];
1279  int i;
1280 
1282 
1283  if (of->shortest) {
1284  for (i = 0; i < of->ctx->nb_streams; i++)
1285  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1286  }
1287 }
1288 
1289 /**
1290  * Get and encode new output from any of the filtergraphs, without causing
1291  * activity.
1292  *
1293  * @return 0 for success, <0 for severe errors
1294  */
1295 static int reap_filters(int flush)
1296 {
1297  AVFrame *filtered_frame = NULL;
1298  int i;
1299 
1300  /* Reap all buffers present in the buffer sinks */
1301  for (i = 0; i < nb_output_streams; i++) {
1302  OutputStream *ost = output_streams[i];
1303  OutputFile *of = output_files[ost->file_index];
1305  AVCodecContext *enc = ost->enc_ctx;
1306  int ret = 0;
1307 
1308  if (!ost->filter)
1309  continue;
1310  filter = ost->filter->filter;
1311 
1312  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1313  return AVERROR(ENOMEM);
1314  }
1315  filtered_frame = ost->filtered_frame;
1316 
1317  while (1) {
1318  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1319  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1321  if (ret < 0) {
1322  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1324  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1325  } else if (flush && ret == AVERROR_EOF) {
1326  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1327  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1328  }
1329  break;
1330  }
1331  if (ost->finished) {
1332  av_frame_unref(filtered_frame);
1333  continue;
1334  }
1335  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1336  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1337  AVRational tb = enc->time_base;
1338  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1339 
1340  tb.den <<= extra_bits;
1341  float_pts =
1342  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1343  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1344  float_pts /= 1 << extra_bits;
1345  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1346  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1347 
1348  filtered_frame->pts =
1349  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1350  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1351  }
1352  //if (ost->source_index >= 0)
1353  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1354 
1355  switch (filter->inputs[0]->type) {
1356  case AVMEDIA_TYPE_VIDEO:
1357  if (!ost->frame_aspect_ratio.num)
1358  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1359 
1360  if (debug_ts) {
1361  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1362  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1363  float_pts,
1364  enc->time_base.num, enc->time_base.den);
1365  }
1366 
1367  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1368  break;
1369  case AVMEDIA_TYPE_AUDIO:
1370  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1371  enc->channels != av_frame_get_channels(filtered_frame)) {
1373  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1374  break;
1375  }
1376  do_audio_out(of->ctx, ost, filtered_frame);
1377  break;
1378  default:
1379  // TODO support subtitle filters
1380  av_assert0(0);
1381  }
1382 
1383  av_frame_unref(filtered_frame);
1384  }
1385  }
1386 
1387  return 0;
1388 }
1389 
1390 static void print_final_stats(int64_t total_size)
1391 {
1392  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1393  uint64_t subtitle_size = 0;
1394  uint64_t data_size = 0;
1395  float percent = -1.0;
1396  int i, j;
1397  int pass1_used = 1;
1398 
1399  for (i = 0; i < nb_output_streams; i++) {
1400  OutputStream *ost = output_streams[i];
1401  switch (ost->enc_ctx->codec_type) {
1402  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1403  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1404  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1405  default: other_size += ost->data_size; break;
1406  }
1407  extra_size += ost->enc_ctx->extradata_size;
1408  data_size += ost->data_size;
1411  pass1_used = 0;
1412  }
1413 
1414  if (data_size && total_size>0 && total_size >= data_size)
1415  percent = 100.0 * (total_size - data_size) / data_size;
1416 
1417  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1418  video_size / 1024.0,
1419  audio_size / 1024.0,
1420  subtitle_size / 1024.0,
1421  other_size / 1024.0,
1422  extra_size / 1024.0);
1423  if (percent >= 0.0)
1424  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1425  else
1426  av_log(NULL, AV_LOG_INFO, "unknown");
1427  av_log(NULL, AV_LOG_INFO, "\n");
1428 
1429  /* print verbose per-stream stats */
1430  for (i = 0; i < nb_input_files; i++) {
1431  InputFile *f = input_files[i];
1432  uint64_t total_packets = 0, total_size = 0;
1433 
1434  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1435  i, f->ctx->filename);
1436 
1437  for (j = 0; j < f->nb_streams; j++) {
1438  InputStream *ist = input_streams[f->ist_index + j];
1439  enum AVMediaType type = ist->dec_ctx->codec_type;
1440 
1441  total_size += ist->data_size;
1442  total_packets += ist->nb_packets;
1443 
1444  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1445  i, j, media_type_string(type));
1446  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1447  ist->nb_packets, ist->data_size);
1448 
1449  if (ist->decoding_needed) {
1450  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1451  ist->frames_decoded);
1452  if (type == AVMEDIA_TYPE_AUDIO)
1453  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1454  av_log(NULL, AV_LOG_VERBOSE, "; ");
1455  }
1456 
1457  av_log(NULL, AV_LOG_VERBOSE, "\n");
1458  }
1459 
1460  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1461  total_packets, total_size);
1462  }
1463 
1464  for (i = 0; i < nb_output_files; i++) {
1465  OutputFile *of = output_files[i];
1466  uint64_t total_packets = 0, total_size = 0;
1467 
1468  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1469  i, of->ctx->filename);
1470 
1471  for (j = 0; j < of->ctx->nb_streams; j++) {
1472  OutputStream *ost = output_streams[of->ost_index + j];
1473  enum AVMediaType type = ost->enc_ctx->codec_type;
1474 
1475  total_size += ost->data_size;
1476  total_packets += ost->packets_written;
1477 
1478  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1479  i, j, media_type_string(type));
1480  if (ost->encoding_needed) {
1481  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1482  ost->frames_encoded);
1483  if (type == AVMEDIA_TYPE_AUDIO)
1484  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1485  av_log(NULL, AV_LOG_VERBOSE, "; ");
1486  }
1487 
1488  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1489  ost->packets_written, ost->data_size);
1490 
1491  av_log(NULL, AV_LOG_VERBOSE, "\n");
1492  }
1493 
1494  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1495  total_packets, total_size);
1496  }
1497  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1498  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1499  if (pass1_used) {
1500  av_log(NULL, AV_LOG_WARNING, "\n");
1501  } else {
1502  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1503  }
1504  }
1505 }
1506 
1507 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1508 {
1509  char buf[1024];
1510  AVBPrint buf_script;
1511  OutputStream *ost;
1512  AVFormatContext *oc;
1513  int64_t total_size;
1514  AVCodecContext *enc;
1515  int frame_number, vid, i;
1516  double bitrate;
1517  double speed;
1518  int64_t pts = INT64_MIN + 1;
1519  static int64_t last_time = -1;
1520  static int qp_histogram[52];
1521  int hours, mins, secs, us;
1522  int ret;
1523  float t;
1524 
1525  if (!print_stats && !is_last_report && !progress_avio)
1526  return;
1527 
1528  if (!is_last_report) {
1529  if (last_time == -1) {
1530  last_time = cur_time;
1531  return;
1532  }
1533  if ((cur_time - last_time) < 500000)
1534  return;
1535  last_time = cur_time;
1536  }
1537 
1538  t = (cur_time-timer_start) / 1000000.0;
1539 
1540 
1541  oc = output_files[0]->ctx;
1542 
1543  total_size = avio_size(oc->pb);
1544  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1545  total_size = avio_tell(oc->pb);
1546 
1547  buf[0] = '\0';
1548  vid = 0;
1549  av_bprint_init(&buf_script, 0, 1);
1550  for (i = 0; i < nb_output_streams; i++) {
1551  float q = -1;
1552  ost = output_streams[i];
1553  enc = ost->enc_ctx;
1554  if (!ost->stream_copy)
1555  q = ost->quality / (float) FF_QP2LAMBDA;
1556 
1557  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1558  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1559  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560  ost->file_index, ost->index, q);
1561  }
1562  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1563  float fps;
1564 
1565  frame_number = ost->frame_number;
1566  fps = t > 1 ? frame_number / t : 0;
1567  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1568  frame_number, fps < 9.95, fps, q);
1569  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1570  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1571  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1572  ost->file_index, ost->index, q);
1573  if (is_last_report)
1574  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1575  if (qp_hist) {
1576  int j;
1577  int qp = lrintf(q);
1578  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1579  qp_histogram[qp]++;
1580  for (j = 0; j < 32; j++)
1581  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1582  }
1583 
1584  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1585  int j;
1586  double error, error_sum = 0;
1587  double scale, scale_sum = 0;
1588  double p;
1589  char type[3] = { 'Y','U','V' };
1590  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1591  for (j = 0; j < 3; j++) {
1592  if (is_last_report) {
1593  error = enc->error[j];
1594  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1595  } else {
1596  error = ost->error[j];
1597  scale = enc->width * enc->height * 255.0 * 255.0;
1598  }
1599  if (j)
1600  scale /= 4;
1601  error_sum += error;
1602  scale_sum += scale;
1603  p = psnr(error / scale);
1604  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1605  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1606  ost->file_index, ost->index, type[j] | 32, p);
1607  }
1608  p = psnr(error_sum / scale_sum);
1609  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1610  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1611  ost->file_index, ost->index, p);
1612  }
1613  vid = 1;
1614  }
1615  /* compute min output value */
1617  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1618  ost->st->time_base, AV_TIME_BASE_Q));
1619  if (is_last_report)
1620  nb_frames_drop += ost->last_dropped;
1621  }
1622 
1623  secs = FFABS(pts) / AV_TIME_BASE;
1624  us = FFABS(pts) % AV_TIME_BASE;
1625  mins = secs / 60;
1626  secs %= 60;
1627  hours = mins / 60;
1628  mins %= 60;
1629 
1630  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1631  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1632 
1633  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1634  "size=N/A time=");
1635  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1636  "size=%8.0fkB time=", total_size / 1024.0);
1637  if (pts < 0)
1638  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1639  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1640  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1641  (100 * us) / AV_TIME_BASE);
1642 
1643  if (bitrate < 0) {
1644  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1645  av_bprintf(&buf_script, "bitrate=N/A\n");
1646  }else{
1647  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1648  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1649  }
1650 
1651  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1652  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1653  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1654  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1655  hours, mins, secs, us);
1656 
1658  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1660  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1661  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1662 
1663  if (speed < 0) {
1664  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1665  av_bprintf(&buf_script, "speed=N/A\n");
1666  } else {
1667  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1668  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1669  }
1670 
1671  if (print_stats || is_last_report) {
1672  const char end = is_last_report ? '\n' : '\r';
1673  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1674  fprintf(stderr, "%s %c", buf, end);
1675  } else
1676  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1677 
1678  fflush(stderr);
1679  }
1680 
1681  if (progress_avio) {
1682  av_bprintf(&buf_script, "progress=%s\n",
1683  is_last_report ? "end" : "continue");
1684  avio_write(progress_avio, buf_script.str,
1685  FFMIN(buf_script.len, buf_script.size - 1));
1686  avio_flush(progress_avio);
1687  av_bprint_finalize(&buf_script, NULL);
1688  if (is_last_report) {
1689  if ((ret = avio_closep(&progress_avio)) < 0)
1691  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1692  }
1693  }
1694 
1695  if (is_last_report)
1696  print_final_stats(total_size);
1697 }
1698 
1699 static void flush_encoders(void)
1700 {
1701  int i, ret;
1702 
1703  for (i = 0; i < nb_output_streams; i++) {
1704  OutputStream *ost = output_streams[i];
1705  AVCodecContext *enc = ost->enc_ctx;
1706  AVFormatContext *os = output_files[ost->file_index]->ctx;
1707  int stop_encoding = 0;
1708 
1709  if (!ost->encoding_needed)
1710  continue;
1711 
1712  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1713  continue;
1714 #if FF_API_LAVF_FMT_RAWPICTURE
1715  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1716  continue;
1717 #endif
1718 
1719  for (;;) {
1720  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1721  const char *desc;
1722 
1723  switch (enc->codec_type) {
1724  case AVMEDIA_TYPE_AUDIO:
1725  encode = avcodec_encode_audio2;
1726  desc = "audio";
1727  break;
1728  case AVMEDIA_TYPE_VIDEO:
1729  encode = avcodec_encode_video2;
1730  desc = "video";
1731  break;
1732  default:
1733  stop_encoding = 1;
1734  }
1735 
1736  if (encode) {
1737  AVPacket pkt;
1738  int pkt_size;
1739  int got_packet;
1740  av_init_packet(&pkt);
1741  pkt.data = NULL;
1742  pkt.size = 0;
1743 
1745  ret = encode(enc, &pkt, NULL, &got_packet);
1746  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1747  if (ret < 0) {
1748  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1749  desc,
1750  av_err2str(ret));
1751  exit_program(1);
1752  }
1753  if (ost->logfile && enc->stats_out) {
1754  fprintf(ost->logfile, "%s", enc->stats_out);
1755  }
1756  if (!got_packet) {
1757  stop_encoding = 1;
1758  break;
1759  }
1760  if (ost->finished & MUXER_FINISHED) {
1761  av_packet_unref(&pkt);
1762  continue;
1763  }
1764  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1765  pkt_size = pkt.size;
1766  write_frame(os, &pkt, ost);
1768  do_video_stats(ost, pkt_size);
1769  }
1770  }
1771 
1772  if (stop_encoding)
1773  break;
1774  }
1775  }
1776 }
1777 
1778 /*
1779  * Check whether a packet from ist should be written into ost at this time
1780  */
1782 {
1783  OutputFile *of = output_files[ost->file_index];
1784  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1785 
1786  if (ost->source_index != ist_index)
1787  return 0;
1788 
1789  if (ost->finished)
1790  return 0;
1791 
1792  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1793  return 0;
1794 
1795  return 1;
1796 }
1797 
1798 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1799 {
1800  OutputFile *of = output_files[ost->file_index];
1801  InputFile *f = input_files [ist->file_index];
1802  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1803  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1804  AVPicture pict;
1805  AVPacket opkt;
1806 
1807  av_init_packet(&opkt);
1808 
1809  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1811  return;
1812 
1813  if (!ost->frame_number && !ost->copy_prior_start) {
1814  int64_t comp_start = start_time;
1815  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1816  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1817  if (pkt->pts == AV_NOPTS_VALUE ?
1818  ist->pts < comp_start :
1819  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1820  return;
1821  }
1822 
1823  if (of->recording_time != INT64_MAX &&
1824  ist->pts >= of->recording_time + start_time) {
1825  close_output_stream(ost);
1826  return;
1827  }
1828 
1829  if (f->recording_time != INT64_MAX) {
1830  start_time = f->ctx->start_time;
1831  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1832  start_time += f->start_time;
1833  if (ist->pts >= f->recording_time + start_time) {
1834  close_output_stream(ost);
1835  return;
1836  }
1837  }
1838 
1839  /* force the input stream PTS */
1840  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1841  ost->sync_opts++;
1842 
1843  if (pkt->pts != AV_NOPTS_VALUE)
1844  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1845  else
1846  opkt.pts = AV_NOPTS_VALUE;
1847 
1848  if (pkt->dts == AV_NOPTS_VALUE)
1849  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1850  else
1851  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1852  opkt.dts -= ost_tb_start_time;
1853 
1854  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1856  if(!duration)
1857  duration = ist->dec_ctx->frame_size;
1858  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1860  ost->st->time_base) - ost_tb_start_time;
1861  }
1862 
1863  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1864  opkt.flags = pkt->flags;
1865  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1866  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1867  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1868  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1869  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1870  ) {
1871  int ret = av_parser_change(ost->parser, ost->st->codec,
1872  &opkt.data, &opkt.size,
1873  pkt->data, pkt->size,
1875  if (ret < 0) {
1876  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1877  av_err2str(ret));
1878  exit_program(1);
1879  }
1880  if (ret) {
1881  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1882  if (!opkt.buf)
1883  exit_program(1);
1884  }
1885  } else {
1886  opkt.data = pkt->data;
1887  opkt.size = pkt->size;
1888  }
1889  av_copy_packet_side_data(&opkt, pkt);
1890 
1891 #if FF_API_LAVF_FMT_RAWPICTURE
1892  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1893  ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1894  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1895  /* store AVPicture in AVPacket, as expected by the output format */
1896  int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1897  if (ret < 0) {
1898  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1899  av_err2str(ret));
1900  exit_program(1);
1901  }
1902  opkt.data = (uint8_t *)&pict;
1903  opkt.size = sizeof(AVPicture);
1904  opkt.flags |= AV_PKT_FLAG_KEY;
1905  }
1906 #endif
1907 
1908  write_frame(of->ctx, &opkt, ost);
1909 }
1910 
1912 {
1913  AVCodecContext *dec = ist->dec_ctx;
1914 
1915  if (!dec->channel_layout) {
1916  char layout_name[256];
1917 
1918  if (dec->channels > ist->guess_layout_max)
1919  return 0;
1921  if (!dec->channel_layout)
1922  return 0;
1923  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1924  dec->channels, dec->channel_layout);
1925  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1926  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1927  }
1928  return 1;
1929 }
1930 
1931 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1932 {
1933  if (*got_output || ret<0)
1934  decode_error_stat[ret<0] ++;
1935 
1936  if (ret < 0 && exit_on_error)
1937  exit_program(1);
1938 
1939  if (exit_on_error && *got_output && ist) {
1941  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1942  exit_program(1);
1943  }
1944  }
1945 }
1946 
1947 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1948 {
1949  AVFrame *decoded_frame, *f;
1950  AVCodecContext *avctx = ist->dec_ctx;
1951  int i, ret, err = 0, resample_changed;
1952  AVRational decoded_frame_tb;
1953 
1954  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1955  return AVERROR(ENOMEM);
1956  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1957  return AVERROR(ENOMEM);
1958  decoded_frame = ist->decoded_frame;
1959 
1961  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1962  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1963 
1964  if (ret >= 0 && avctx->sample_rate <= 0) {
1965  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1966  ret = AVERROR_INVALIDDATA;
1967  }
1968 
1969  check_decode_result(ist, got_output, ret);
1970 
1971  if (!*got_output || ret < 0)
1972  return ret;
1973 
1974  ist->samples_decoded += decoded_frame->nb_samples;
1975  ist->frames_decoded++;
1976 
1977 #if 1
1978  /* increment next_dts to use for the case where the input stream does not
1979  have timestamps or there are multiple frames in the packet */
1980  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1981  avctx->sample_rate;
1982  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1983  avctx->sample_rate;
1984 #endif
1985 
1986  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1987  ist->resample_channels != avctx->channels ||
1988  ist->resample_channel_layout != decoded_frame->channel_layout ||
1989  ist->resample_sample_rate != decoded_frame->sample_rate;
1990  if (resample_changed) {
1991  char layout1[64], layout2[64];
1992 
1993  if (!guess_input_channel_layout(ist)) {
1994  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1995  "layout for Input Stream #%d.%d\n", ist->file_index,
1996  ist->st->index);
1997  exit_program(1);
1998  }
1999  decoded_frame->channel_layout = avctx->channel_layout;
2000 
2001  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2003  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2004  decoded_frame->channel_layout);
2005 
2007  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2008  ist->file_index, ist->st->index,
2010  ist->resample_channels, layout1,
2011  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2012  avctx->channels, layout2);
2013 
2014  ist->resample_sample_fmt = decoded_frame->format;
2015  ist->resample_sample_rate = decoded_frame->sample_rate;
2016  ist->resample_channel_layout = decoded_frame->channel_layout;
2017  ist->resample_channels = avctx->channels;
2018 
2019  for (i = 0; i < nb_filtergraphs; i++)
2020  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2021  FilterGraph *fg = filtergraphs[i];
2022  if (configure_filtergraph(fg) < 0) {
2023  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2024  exit_program(1);
2025  }
2026  }
2027  }
2028 
2029  /* if the decoder provides a pts, use it instead of the last packet pts.
2030  the decoder could be delaying output by a packet or more. */
2031  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2032  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2033  decoded_frame_tb = avctx->time_base;
2034  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2035  decoded_frame->pts = decoded_frame->pkt_pts;
2036  decoded_frame_tb = ist->st->time_base;
2037  } else if (pkt->pts != AV_NOPTS_VALUE) {
2038  decoded_frame->pts = pkt->pts;
2039  decoded_frame_tb = ist->st->time_base;
2040  }else {
2041  decoded_frame->pts = ist->dts;
2042  decoded_frame_tb = AV_TIME_BASE_Q;
2043  }
2044  pkt->pts = AV_NOPTS_VALUE;
2045  if (decoded_frame->pts != AV_NOPTS_VALUE)
2046  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2047  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2048  (AVRational){1, avctx->sample_rate});
2049  ist->nb_samples = decoded_frame->nb_samples;
2050  for (i = 0; i < ist->nb_filters; i++) {
2051  if (i < ist->nb_filters - 1) {
2052  f = ist->filter_frame;
2053  err = av_frame_ref(f, decoded_frame);
2054  if (err < 0)
2055  break;
2056  } else
2057  f = decoded_frame;
2058  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2060  if (err == AVERROR_EOF)
2061  err = 0; /* ignore */
2062  if (err < 0)
2063  break;
2064  }
2065  decoded_frame->pts = AV_NOPTS_VALUE;
2066 
2067  av_frame_unref(ist->filter_frame);
2068  av_frame_unref(decoded_frame);
2069  return err < 0 ? err : ret;
2070 }
2071 
2072 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2073 {
2074  AVFrame *decoded_frame, *f;
2075  int i, ret = 0, err = 0, resample_changed;
2076  int64_t best_effort_timestamp;
2077  AVRational *frame_sample_aspect;
2078 
2079  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2080  return AVERROR(ENOMEM);
2081  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2082  return AVERROR(ENOMEM);
2083  decoded_frame = ist->decoded_frame;
2084  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2085 
2087  ret = avcodec_decode_video2(ist->dec_ctx,
2088  decoded_frame, got_output, pkt);
2089  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2090 
2091  // The following line may be required in some cases where there is no parser
2092  // or the parser does not has_b_frames correctly
2093  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2094  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2095  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2096  } else
2098  "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2099  "If you want to help, upload a sample "
2100  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2101  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2102  ist->dec_ctx->has_b_frames,
2103  ist->st->codec->has_b_frames);
2104  }
2105 
2106  check_decode_result(ist, got_output, ret);
2107 
2108  if (*got_output && ret >= 0) {
2109  if (ist->dec_ctx->width != decoded_frame->width ||
2110  ist->dec_ctx->height != decoded_frame->height ||
2111  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2112  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2113  decoded_frame->width,
2114  decoded_frame->height,
2115  decoded_frame->format,
2116  ist->dec_ctx->width,
2117  ist->dec_ctx->height,
2118  ist->dec_ctx->pix_fmt);
2119  }
2120  }
2121 
2122  if (!*got_output || ret < 0)
2123  return ret;
2124 
2125  if(ist->top_field_first>=0)
2126  decoded_frame->top_field_first = ist->top_field_first;
2127 
2128  ist->frames_decoded++;
2129 
2130  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2131  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2132  if (err < 0)
2133  goto fail;
2134  }
2135  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2136 
2137  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2138  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2139  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2140 
2141  if (ts != AV_NOPTS_VALUE)
2142  ist->next_pts = ist->pts = ts;
2143  }
2144 
2145  if (debug_ts) {
2146  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2147  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2148  ist->st->index, av_ts2str(decoded_frame->pts),
2149  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2150  best_effort_timestamp,
2151  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2152  decoded_frame->key_frame, decoded_frame->pict_type,
2153  ist->st->time_base.num, ist->st->time_base.den);
2154  }
2155 
2156  pkt->size = 0;
2157 
2158  if (ist->st->sample_aspect_ratio.num)
2159  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2160 
2161  resample_changed = ist->resample_width != decoded_frame->width ||
2162  ist->resample_height != decoded_frame->height ||
2163  ist->resample_pix_fmt != decoded_frame->format;
2164  if (resample_changed) {
2166  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2167  ist->file_index, ist->st->index,
2169  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2170 
2171  ist->resample_width = decoded_frame->width;
2172  ist->resample_height = decoded_frame->height;
2173  ist->resample_pix_fmt = decoded_frame->format;
2174 
2175  for (i = 0; i < nb_filtergraphs; i++) {
2176  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2177  configure_filtergraph(filtergraphs[i]) < 0) {
2178  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2179  exit_program(1);
2180  }
2181  }
2182  }
2183 
2184  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2185  for (i = 0; i < ist->nb_filters; i++) {
2186  if (!frame_sample_aspect->num)
2187  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2188 
2189  if (i < ist->nb_filters - 1) {
2190  f = ist->filter_frame;
2191  err = av_frame_ref(f, decoded_frame);
2192  if (err < 0)
2193  break;
2194  } else
2195  f = decoded_frame;
2197  if (ret == AVERROR_EOF) {
2198  ret = 0; /* ignore */
2199  } else if (ret < 0) {
2201  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2202  exit_program(1);
2203  }
2204  }
2205 
2206 fail:
2208  av_frame_unref(decoded_frame);
2209  return err < 0 ? err : ret;
2210 }
2211 
2212 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2213 {
2214  AVSubtitle subtitle;
2215  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2216  &subtitle, got_output, pkt);
2217 
2218  check_decode_result(NULL, got_output, ret);
2219 
2220  if (ret < 0 || !*got_output) {
2221  if (!pkt->size)
2222  sub2video_flush(ist);
2223  return ret;
2224  }
2225 
2226  if (ist->fix_sub_duration) {
2227  int end = 1;
2228  if (ist->prev_sub.got_output) {
2229  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2230  1000, AV_TIME_BASE);
2231  if (end < ist->prev_sub.subtitle.end_display_time) {
2232  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2233  "Subtitle duration reduced from %d to %d%s\n",
2235  end <= 0 ? ", dropping it" : "");
2237  }
2238  }
2239  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2240  FFSWAP(int, ret, ist->prev_sub.ret);
2241  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2242  if (end <= 0)
2243  goto out;
2244  }
2245 
2246  if (!*got_output)
2247  return ret;
2248 
2249  sub2video_update(ist, &subtitle);
2250 
2251  if (!subtitle.num_rects)
2252  goto out;
2253 
2254  ist->frames_decoded++;
2255 
2256  for (i = 0; i < nb_output_streams; i++) {
2257  OutputStream *ost = output_streams[i];
2258 
2259  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2260  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2261  continue;
2262 
2263  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2264  }
2265 
2266 out:
2267  avsubtitle_free(&subtitle);
2268  return ret;
2269 }
2270 
2272 {
2273  int i, ret;
2274  for (i = 0; i < ist->nb_filters; i++) {
2275  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2276  if (ret < 0)
2277  return ret;
2278  }
2279  return 0;
2280 }
2281 
2282 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2283 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2284 {
2285  int ret = 0, i;
2286  int got_output = 0;
2287 
2288  AVPacket avpkt;
2289  if (!ist->saw_first_ts) {
2290  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2291  ist->pts = 0;
2292  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2293  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2294  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2295  }
2296  ist->saw_first_ts = 1;
2297  }
2298 
2299  if (ist->next_dts == AV_NOPTS_VALUE)
2300  ist->next_dts = ist->dts;
2301  if (ist->next_pts == AV_NOPTS_VALUE)
2302  ist->next_pts = ist->pts;
2303 
2304  if (!pkt) {
2305  /* EOF handling */
2306  av_init_packet(&avpkt);
2307  avpkt.data = NULL;
2308  avpkt.size = 0;
2309  goto handle_eof;
2310  } else {
2311  avpkt = *pkt;
2312  }
2313 
2314  if (pkt->dts != AV_NOPTS_VALUE) {
2315  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2316  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2317  ist->next_pts = ist->pts = ist->dts;
2318  }
2319 
2320  // while we have more to decode or while the decoder did output something on EOF
2321  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2322  int duration;
2323  handle_eof:
2324 
2325  ist->pts = ist->next_pts;
2326  ist->dts = ist->next_dts;
2327 
2328  if (avpkt.size && avpkt.size != pkt->size &&
2331  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2332  ist->showed_multi_packet_warning = 1;
2333  }
2334 
2335  switch (ist->dec_ctx->codec_type) {
2336  case AVMEDIA_TYPE_AUDIO:
2337  ret = decode_audio (ist, &avpkt, &got_output);
2338  break;
2339  case AVMEDIA_TYPE_VIDEO:
2340  ret = decode_video (ist, &avpkt, &got_output);
2341  if (avpkt.duration) {
2342  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2343  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2345  duration = ((int64_t)AV_TIME_BASE *
2346  ist->dec_ctx->framerate.den * ticks) /
2348  } else
2349  duration = 0;
2350 
2351  if(ist->dts != AV_NOPTS_VALUE && duration) {
2352  ist->next_dts += duration;
2353  }else
2354  ist->next_dts = AV_NOPTS_VALUE;
2355 
2356  if (got_output)
2357  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2358  break;
2359  case AVMEDIA_TYPE_SUBTITLE:
2360  ret = transcode_subtitles(ist, &avpkt, &got_output);
2361  break;
2362  default:
2363  return -1;
2364  }
2365 
2366  if (ret < 0) {
2367  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2368  ist->file_index, ist->st->index, av_err2str(ret));
2369  if (exit_on_error)
2370  exit_program(1);
2371  break;
2372  }
2373 
2374  avpkt.dts=
2375  avpkt.pts= AV_NOPTS_VALUE;
2376 
2377  // touch data and size only if not EOF
2378  if (pkt) {
2380  ret = avpkt.size;
2381  avpkt.data += ret;
2382  avpkt.size -= ret;
2383  }
2384  if (!got_output) {
2385  continue;
2386  }
2387  if (got_output && !pkt)
2388  break;
2389  }
2390 
2391  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2392  /* except when looping we need to flush but not to send an EOF */
2393  if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2394  int ret = send_filter_eof(ist);
2395  if (ret < 0) {
2396  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2397  exit_program(1);
2398  }
2399  }
2400 
2401  /* handle stream copy */
2402  if (!ist->decoding_needed) {
2403  ist->dts = ist->next_dts;
2404  switch (ist->dec_ctx->codec_type) {
2405  case AVMEDIA_TYPE_AUDIO:
2406  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2407  ist->dec_ctx->sample_rate;
2408  break;
2409  case AVMEDIA_TYPE_VIDEO:
2410  if (ist->framerate.num) {
2411  // TODO: Remove work-around for c99-to-c89 issue 7
2412  AVRational time_base_q = AV_TIME_BASE_Q;
2413  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2414  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2415  } else if (pkt->duration) {
2416  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2417  } else if(ist->dec_ctx->framerate.num != 0) {
2418  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2419  ist->next_dts += ((int64_t)AV_TIME_BASE *
2420  ist->dec_ctx->framerate.den * ticks) /
2422  }
2423  break;
2424  }
2425  ist->pts = ist->dts;
2426  ist->next_pts = ist->next_dts;
2427  }
2428  for (i = 0; pkt && i < nb_output_streams; i++) {
2429  OutputStream *ost = output_streams[i];
2430 
2431  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2432  continue;
2433 
2434  do_streamcopy(ist, ost, pkt);
2435  }
2436 
2437  return got_output;
2438 }
2439 
2440 static void print_sdp(void)
2441 {
2442  char sdp[16384];
2443  int i;
2444  int j;
2445  AVIOContext *sdp_pb;
2446  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2447 
2448  if (!avc)
2449  exit_program(1);
2450  for (i = 0, j = 0; i < nb_output_files; i++) {
2451  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2452  avc[j] = output_files[i]->ctx;
2453  j++;
2454  }
2455  }
2456 
2457  if (!j)
2458  goto fail;
2459 
2460  av_sdp_create(avc, j, sdp, sizeof(sdp));
2461 
2462  if (!sdp_filename) {
2463  printf("SDP:\n%s\n", sdp);
2464  fflush(stdout);
2465  } else {
2466  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2467  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2468  } else {
2469  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2470  avio_closep(&sdp_pb);
2472  }
2473  }
2474 
2475 fail:
2476  av_freep(&avc);
2477 }
2478 
2480 {
2481  int i;
2482  for (i = 0; hwaccels[i].name; i++)
2483  if (hwaccels[i].pix_fmt == pix_fmt)
2484  return &hwaccels[i];
2485  return NULL;
2486 }
2487 
2489 {
2490  InputStream *ist = s->opaque;
2491  const enum AVPixelFormat *p;
2492  int ret;
2493 
2494  for (p = pix_fmts; *p != -1; p++) {
2496  const HWAccel *hwaccel;
2497 
2498  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2499  break;
2500 
2501  hwaccel = get_hwaccel(*p);
2502  if (!hwaccel ||
2503  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2504  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2505  continue;
2506 
2507  ret = hwaccel->init(s);
2508  if (ret < 0) {
2509  if (ist->hwaccel_id == hwaccel->id) {
2511  "%s hwaccel requested for input stream #%d:%d, "
2512  "but cannot be initialized.\n", hwaccel->name,
2513  ist->file_index, ist->st->index);
2514  return AV_PIX_FMT_NONE;
2515  }
2516  continue;
2517  }
2518  ist->active_hwaccel_id = hwaccel->id;
2519  ist->hwaccel_pix_fmt = *p;
2520  break;
2521  }
2522 
2523  return *p;
2524 }
2525 
2527 {
2528  InputStream *ist = s->opaque;
2529 
2530  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2531  return ist->hwaccel_get_buffer(s, frame, flags);
2532 
2533  return avcodec_default_get_buffer2(s, frame, flags);
2534 }
2535 
2536 static int init_input_stream(int ist_index, char *error, int error_len)
2537 {
2538  int ret;
2539  InputStream *ist = input_streams[ist_index];
2540 
2541  if (ist->decoding_needed) {
2542  AVCodec *codec = ist->dec;
2543  if (!codec) {
2544  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2545  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2546  return AVERROR(EINVAL);
2547  }
2548 
2549  ist->dec_ctx->opaque = ist;
2550  ist->dec_ctx->get_format = get_format;
2551  ist->dec_ctx->get_buffer2 = get_buffer;
2552  ist->dec_ctx->thread_safe_callbacks = 1;
2553 
2554  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2555  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2556  (ist->decoding_needed & DECODING_FOR_OST)) {
2557  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2559  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2560  }
2561 
2562  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2563 
2564  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2565  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2566  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2567  if (ret == AVERROR_EXPERIMENTAL)
2568  abort_codec_experimental(codec, 0);
2569 
2570  snprintf(error, error_len,
2571  "Error while opening decoder for input stream "
2572  "#%d:%d : %s",
2573  ist->file_index, ist->st->index, av_err2str(ret));
2574  return ret;
2575  }
2577  }
2578 
2579  ist->next_pts = AV_NOPTS_VALUE;
2580  ist->next_dts = AV_NOPTS_VALUE;
2581 
2582  return 0;
2583 }
2584 
2586 {
2587  if (ost->source_index >= 0)
2588  return input_streams[ost->source_index];
2589  return NULL;
2590 }
2591 
2592 static int compare_int64(const void *a, const void *b)
2593 {
2594  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2595 }
2596 
2597 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2598 {
2599  int ret = 0;
2600 
2601  if (ost->encoding_needed) {
2602  AVCodec *codec = ost->enc;
2603  AVCodecContext *dec = NULL;
2604  InputStream *ist;
2605 
2606  if ((ist = get_input_stream(ost)))
2607  dec = ist->dec_ctx;
2608  if (dec && dec->subtitle_header) {
2609  /* ASS code assumes this buffer is null terminated so add extra byte. */
2611  if (!ost->enc_ctx->subtitle_header)
2612  return AVERROR(ENOMEM);
2613  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2615  }
2616  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2617  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2618  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2619  !codec->defaults &&
2620  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2621  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2622  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2623 
2624  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2626  if (!ost->enc_ctx->hw_frames_ctx)
2627  return AVERROR(ENOMEM);
2628  }
2629 
2630  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2631  if (ret == AVERROR_EXPERIMENTAL)
2632  abort_codec_experimental(codec, 1);
2633  snprintf(error, error_len,
2634  "Error while opening encoder for output stream #%d:%d - "
2635  "maybe incorrect parameters such as bit_rate, rate, width or height",
2636  ost->file_index, ost->index);
2637  return ret;
2638  }
2639  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2640  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2642  ost->enc_ctx->frame_size);
2644  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2645  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2646  " It takes bits/s as argument, not kbits/s\n");
2647 
2648  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2649  if (ret < 0) {
2651  "Error initializing the output stream codec context.\n");
2652  exit_program(1);
2653  }
2654 
2655  if (ost->enc_ctx->nb_coded_side_data) {
2656  int i;
2657 
2659  sizeof(*ost->st->side_data));
2660  if (!ost->st->side_data)
2661  return AVERROR(ENOMEM);
2662 
2663  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2664  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2665  AVPacketSideData *sd_dst = &ost->st->side_data[i];
2666 
2667  sd_dst->data = av_malloc(sd_src->size);
2668  if (!sd_dst->data)
2669  return AVERROR(ENOMEM);
2670  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2671  sd_dst->size = sd_src->size;
2672  sd_dst->type = sd_src->type;
2673  ost->st->nb_side_data++;
2674  }
2675  }
2676 
2677  // copy timebase while removing common factors
2678  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2679  ost->st->codec->codec= ost->enc_ctx->codec;
2680  } else if (ost->stream_copy) {
2681  ret = av_opt_set_dict(ost->st->codec, &ost->encoder_opts);
2682  if (ret < 0) {
2684  "Error setting up codec context options.\n");
2685  return ret;
2686  }
2687  // copy timebase while removing common factors
2688  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2689  }
2690 
2691  return ret;
2692 }
2693 
2694 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2695  AVCodecContext *avctx)
2696 {
2697  char *p;
2698  int n = 1, i, size, index = 0;
2699  int64_t t, *pts;
2700 
2701  for (p = kf; *p; p++)
2702  if (*p == ',')
2703  n++;
2704  size = n;
2705  pts = av_malloc_array(size, sizeof(*pts));
2706  if (!pts) {
2707  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2708  exit_program(1);
2709  }
2710 
2711  p = kf;
2712  for (i = 0; i < n; i++) {
2713  char *next = strchr(p, ',');
2714 
2715  if (next)
2716  *next++ = 0;
2717 
2718  if (!memcmp(p, "chapters", 8)) {
2719 
2720  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2721  int j;
2722 
2723  if (avf->nb_chapters > INT_MAX - size ||
2724  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2725  sizeof(*pts)))) {
2727  "Could not allocate forced key frames array.\n");
2728  exit_program(1);
2729  }
2730  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2731  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2732 
2733  for (j = 0; j < avf->nb_chapters; j++) {
2734  AVChapter *c = avf->chapters[j];
2735  av_assert1(index < size);
2736  pts[index++] = av_rescale_q(c->start, c->time_base,
2737  avctx->time_base) + t;
2738  }
2739 
2740  } else {
2741 
2742  t = parse_time_or_die("force_key_frames", p, 1);
2743  av_assert1(index < size);
2744  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2745 
2746  }
2747 
2748  p = next;
2749  }
2750 
2751  av_assert0(index == size);
2752  qsort(pts, size, sizeof(*pts), compare_int64);
2753  ost->forced_kf_count = size;
2754  ost->forced_kf_pts = pts;
2755 }
2756 
2757 static void report_new_stream(int input_index, AVPacket *pkt)
2758 {
2759  InputFile *file = input_files[input_index];
2760  AVStream *st = file->ctx->streams[pkt->stream_index];
2761 
2762  if (pkt->stream_index < file->nb_streams_warn)
2763  return;
2764  av_log(file->ctx, AV_LOG_WARNING,
2765  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2766  av_get_media_type_string(st->codec->codec_type),
2767  input_index, pkt->stream_index,
2768  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2769  file->nb_streams_warn = pkt->stream_index + 1;
2770 }
2771 
2773 {
2774  AVDictionaryEntry *e;
2775 
2776  uint8_t *encoder_string;
2777  int encoder_string_len;
2778  int format_flags = 0;
2779  int codec_flags = 0;
2780 
2781  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2782  return;
2783 
2784  e = av_dict_get(of->opts, "fflags", NULL, 0);
2785  if (e) {
2786  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2787  if (!o)
2788  return;
2789  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2790  }
2791  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2792  if (e) {
2793  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2794  if (!o)
2795  return;
2796  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2797  }
2798 
2799  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2800  encoder_string = av_mallocz(encoder_string_len);
2801  if (!encoder_string)
2802  exit_program(1);
2803 
2804  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2805  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2806  else
2807  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2808  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2809  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2811 }
2812 
2813 static int transcode_init(void)
2814 {
2815  int ret = 0, i, j, k;
2816  AVFormatContext *oc;
2817  OutputStream *ost;
2818  InputStream *ist;
2819  char error[1024] = {0};
2820  int want_sdp = 1;
2821 
2822  for (i = 0; i < nb_filtergraphs; i++) {
2823  FilterGraph *fg = filtergraphs[i];
2824  for (j = 0; j < fg->nb_outputs; j++) {
2825  OutputFilter *ofilter = fg->outputs[j];
2826  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2827  continue;
2828  if (fg->nb_inputs != 1)
2829  continue;
2830  for (k = nb_input_streams-1; k >= 0 ; k--)
2831  if (fg->inputs[0]->ist == input_streams[k])
2832  break;
2833  ofilter->ost->source_index = k;
2834  }
2835  }
2836 
2837  /* init framerate emulation */
2838  for (i = 0; i < nb_input_files; i++) {
2839  InputFile *ifile = input_files[i];
2840  if (ifile->rate_emu)
2841  for (j = 0; j < ifile->nb_streams; j++)
2842  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2843  }
2844 
2845  /* for each output stream, we compute the right encoding parameters */
2846  for (i = 0; i < nb_output_streams; i++) {
2847  AVCodecContext *enc_ctx;
2849  ost = output_streams[i];
2850  oc = output_files[ost->file_index]->ctx;
2851  ist = get_input_stream(ost);
2852 
2853  if (ost->attachment_filename)
2854  continue;
2855 
2856  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2857 
2858  if (ist) {
2859  dec_ctx = ist->dec_ctx;
2860 
2861  ost->st->disposition = ist->st->disposition;
2862  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2863  } else {
2864  for (j=0; j<oc->nb_streams; j++) {
2865  AVStream *st = oc->streams[j];
2866  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2867  break;
2868  }
2869  if (j == oc->nb_streams)
2870  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2872  }
2873 
2874  if (ost->stream_copy) {
2875  AVRational sar;
2876  uint64_t extra_size;
2877 
2878  av_assert0(ist && !ost->filter);
2879 
2880  extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2881 
2882  if (extra_size > INT_MAX) {
2883  return AVERROR(EINVAL);
2884  }
2885 
2886  /* if stream_copy is selected, no need to decode or encode */
2887  enc_ctx->codec_id = dec_ctx->codec_id;
2888  enc_ctx->codec_type = dec_ctx->codec_type;
2889 
2890  if (!enc_ctx->codec_tag) {
2891  unsigned int codec_tag;
2892  if (!oc->oformat->codec_tag ||
2893  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2894  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2895  enc_ctx->codec_tag = dec_ctx->codec_tag;
2896  }
2897 
2898  enc_ctx->bit_rate = dec_ctx->bit_rate;
2899  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2900  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2901  enc_ctx->field_order = dec_ctx->field_order;
2902  if (dec_ctx->extradata_size) {
2903  enc_ctx->extradata = av_mallocz(extra_size);
2904  if (!enc_ctx->extradata) {
2905  return AVERROR(ENOMEM);
2906  }
2907  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2908  }
2909  enc_ctx->extradata_size= dec_ctx->extradata_size;
2910  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2911  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2912 
2913  enc_ctx->time_base = ist->st->time_base;
2914  /*
2915  * Avi is a special case here because it supports variable fps but
2916  * having the fps and timebase differe significantly adds quite some
2917  * overhead
2918  */
2919  if(!strcmp(oc->oformat->name, "avi")) {
2920  if ( copy_tb<0 && ist->st->r_frame_rate.num
2921  && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2922  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2923  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2924  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2925  || copy_tb==2){
2926  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2927  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2928  enc_ctx->ticks_per_frame = 2;
2929  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2930  && av_q2d(ist->st->time_base) < 1.0/500
2931  || copy_tb==0){
2932  enc_ctx->time_base = dec_ctx->time_base;
2933  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2934  enc_ctx->time_base.den *= 2;
2935  enc_ctx->ticks_per_frame = 2;
2936  }
2937  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2938  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2939  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2940  && strcmp(oc->oformat->name, "f4v")
2941  ) {
2942  if( copy_tb<0 && dec_ctx->time_base.den
2943  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2944  && av_q2d(ist->st->time_base) < 1.0/500
2945  || copy_tb==0){
2946  enc_ctx->time_base = dec_ctx->time_base;
2947  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2948  }
2949  }
2950  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2951  && dec_ctx->time_base.num < dec_ctx->time_base.den
2952  && dec_ctx->time_base.num > 0
2953  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2954  enc_ctx->time_base = dec_ctx->time_base;
2955  }
2956 
2957  if (!ost->frame_rate.num)
2958  ost->frame_rate = ist->framerate;
2959  if(ost->frame_rate.num)
2960  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2961 
2962  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2963  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2964 
2965  if (ist->st->nb_side_data) {
2967  sizeof(*ist->st->side_data));
2968  if (!ost->st->side_data)
2969  return AVERROR(ENOMEM);
2970 
2971  ost->st->nb_side_data = 0;
2972  for (j = 0; j < ist->st->nb_side_data; j++) {
2973  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2974  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2975 
2976  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2977  continue;
2978 
2979  sd_dst->data = av_malloc(sd_src->size);
2980  if (!sd_dst->data)
2981  return AVERROR(ENOMEM);
2982  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2983  sd_dst->size = sd_src->size;
2984  sd_dst->type = sd_src->type;
2985  ost->st->nb_side_data++;
2986  }
2987  }
2988 
2989  ost->parser = av_parser_init(enc_ctx->codec_id);
2990 
2991  switch (enc_ctx->codec_type) {
2992  case AVMEDIA_TYPE_AUDIO:
2993  if (audio_volume != 256) {
2994  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2995  exit_program(1);
2996  }
2997  enc_ctx->channel_layout = dec_ctx->channel_layout;
2998  enc_ctx->sample_rate = dec_ctx->sample_rate;
2999  enc_ctx->channels = dec_ctx->channels;
3000  enc_ctx->frame_size = dec_ctx->frame_size;
3001  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
3002  enc_ctx->block_align = dec_ctx->block_align;
3003  enc_ctx->initial_padding = dec_ctx->delay;
3004  enc_ctx->profile = dec_ctx->profile;
3005 #if FF_API_AUDIOENC_DELAY
3006  enc_ctx->delay = dec_ctx->delay;
3007 #endif
3008  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
3009  enc_ctx->block_align= 0;
3010  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3011  enc_ctx->block_align= 0;
3012  break;
3013  case AVMEDIA_TYPE_VIDEO:
3014  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3015  enc_ctx->colorspace = dec_ctx->colorspace;
3016  enc_ctx->color_range = dec_ctx->color_range;
3017  enc_ctx->color_primaries = dec_ctx->color_primaries;
3018  enc_ctx->color_trc = dec_ctx->color_trc;
3019  enc_ctx->width = dec_ctx->width;
3020  enc_ctx->height = dec_ctx->height;
3021  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3022  enc_ctx->profile = dec_ctx->profile;
3023  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3024  sar =
3026  (AVRational){ enc_ctx->height, enc_ctx->width });
3027  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3028  "with stream copy may produce invalid files\n");
3029  }
3030  else if (ist->st->sample_aspect_ratio.num)
3031  sar = ist->st->sample_aspect_ratio;
3032  else
3033  sar = dec_ctx->sample_aspect_ratio;
3034  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3035  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3036  ost->st->r_frame_rate = ist->st->r_frame_rate;
3037  break;
3038  case AVMEDIA_TYPE_SUBTITLE:
3039  enc_ctx->width = dec_ctx->width;
3040  enc_ctx->height = dec_ctx->height;
3041  break;
3042  case AVMEDIA_TYPE_UNKNOWN:
3043  case AVMEDIA_TYPE_DATA:
3045  break;
3046  default:
3047  abort();
3048  }
3049  } else {
3050  set_encoder_id(output_files[ost->file_index], ost);
3051 
3052 #if CONFIG_LIBMFX
3053  if (qsv_transcode_init(ost))
3054  exit_program(1);
3055 #endif
3056 
3057 #if CONFIG_CUVID
3058  if (cuvid_transcode_init(ost))
3059  exit_program(1);
3060 #endif
3061 
3062  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3063  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3065  FilterGraph *fg = ost->filter->graph;
3066  if (configure_filtergraph(fg)) {
3067  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3068  exit_program(1);
3069  }
3070  }
3071 
3072  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3073  if (!ost->frame_rate.num)
3075  if (ist && !ost->frame_rate.num)
3076  ost->frame_rate = ist->framerate;
3077  if (ist && !ost->frame_rate.num)
3078  ost->frame_rate = ist->st->r_frame_rate;
3079  if (ist && !ost->frame_rate.num) {
3080  ost->frame_rate = (AVRational){25, 1};
3082  "No information "
3083  "about the input framerate is available. Falling "
3084  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3085  "if you want a different framerate.\n",
3086  ost->file_index, ost->index);
3087  }
3088 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3089  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3090  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3091  ost->frame_rate = ost->enc->supported_framerates[idx];
3092  }
3093  // reduce frame rate for mpeg4 to be within the spec limits
3094  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3095  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3096  ost->frame_rate.num, ost->frame_rate.den, 65535);
3097  }
3098  }
3099 
3100  switch (enc_ctx->codec_type) {
3101  case AVMEDIA_TYPE_AUDIO:
3102  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3103  if (dec_ctx)
3104  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3105  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3106  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3107  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3108  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3109  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3110  break;
3111  case AVMEDIA_TYPE_VIDEO:
3112  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3113  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3114  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3115  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3117  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3118  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3119  }
3120  for (j = 0; j < ost->forced_kf_count; j++)
3121  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3123  enc_ctx->time_base);
3124 
3125  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3126  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3127  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3128  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3129  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3131  if (!strncmp(ost->enc->name, "libx264", 7) &&
3132  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3135  "No pixel format specified, %s for H.264 encoding chosen.\n"
3136  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3138  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3139  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3142  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3143  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3145  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3146  if (dec_ctx)
3147  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3148  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3149 
3150  ost->st->avg_frame_rate = ost->frame_rate;
3151 
3152  if (!dec_ctx ||
3153  enc_ctx->width != dec_ctx->width ||
3154  enc_ctx->height != dec_ctx->height ||
3155  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3156  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3157  }
3158 
3159  if (ost->forced_keyframes) {
3160  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3163  if (ret < 0) {
3165  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3166  return ret;
3167  }
3172 
3173  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3174  // parse it only for static kf timings
3175  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3177  }
3178  }
3179  break;
3180  case AVMEDIA_TYPE_SUBTITLE:
3181  enc_ctx->time_base = (AVRational){1, 1000};
3182  if (!enc_ctx->width) {
3183  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3184  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3185  }
3186  break;
3187  case AVMEDIA_TYPE_DATA:
3188  break;
3189  default:
3190  abort();
3191  break;
3192  }
3193  }
3194 
3195  if (ost->disposition) {
3196  static const AVOption opts[] = {
3197  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3198  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3199  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3200  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3201  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3202  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3203  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3204  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3205  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3206  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3207  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3208  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3209  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3210  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3211  { NULL },
3212  };
3213  static const AVClass class = {
3214  .class_name = "",
3215  .item_name = av_default_item_name,
3216  .option = opts,
3217  .version = LIBAVUTIL_VERSION_INT,
3218  };
3219  const AVClass *pclass = &class;
3220 
3221  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3222  if (ret < 0)
3223  goto dump_format;
3224  }
3225  }
3226 
3227  /* init input streams */
3228  for (i = 0; i < nb_input_streams; i++)
3229  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3230  for (i = 0; i < nb_output_streams; i++) {
3231  ost = output_streams[i];
3232  avcodec_close(ost->enc_ctx);
3233  }
3234  goto dump_format;
3235  }
3236 
3237  /* open each encoder */
3238  for (i = 0; i < nb_output_streams; i++) {
3239  ret = init_output_stream(output_streams[i], error, sizeof(error));
3240  if (ret < 0)
3241  goto dump_format;
3242  }
3243 
3244  /* discard unused programs */
3245  for (i = 0; i < nb_input_files; i++) {
3246  InputFile *ifile = input_files[i];
3247  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3248  AVProgram *p = ifile->ctx->programs[j];
3249  int discard = AVDISCARD_ALL;
3250 
3251  for (k = 0; k < p->nb_stream_indexes; k++)
3252  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3253  discard = AVDISCARD_DEFAULT;
3254  break;
3255  }
3256  p->discard = discard;
3257  }
3258  }
3259 
3260  /* open files and write file headers */
3261  for (i = 0; i < nb_output_files; i++) {
3262  oc = output_files[i]->ctx;
3263  oc->interrupt_callback = int_cb;
3264  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3265  snprintf(error, sizeof(error),
3266  "Could not write header for output file #%d "
3267  "(incorrect codec parameters ?): %s",
3268  i, av_err2str(ret));
3269  ret = AVERROR(EINVAL);
3270  goto dump_format;
3271  }
3272 // assert_avoptions(output_files[i]->opts);
3273  if (strcmp(oc->oformat->name, "rtp")) {
3274  want_sdp = 0;
3275  }
3276  }
3277 
3278  dump_format:
3279  /* dump the file output parameters - cannot be done before in case
3280  of stream copy */
3281  for (i = 0; i < nb_output_files; i++) {
3282  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3283  }
3284 
3285  /* dump the stream mapping */
3286  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3287  for (i = 0; i < nb_input_streams; i++) {
3288  ist = input_streams[i];
3289 
3290  for (j = 0; j < ist->nb_filters; j++) {
3291  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3292  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3293  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3294  ist->filters[j]->name);
3295  if (nb_filtergraphs > 1)
3296  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3297  av_log(NULL, AV_LOG_INFO, "\n");
3298  }
3299  }
3300  }
3301 
3302  for (i = 0; i < nb_output_streams; i++) {
3303  ost = output_streams[i];
3304 
3305  if (ost->attachment_filename) {
3306  /* an attached file */
3307  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3308  ost->attachment_filename, ost->file_index, ost->index);
3309  continue;
3310  }
3311 
3312  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3313  /* output from a complex graph */
3314  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3315  if (nb_filtergraphs > 1)
3316  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3317 
3318  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3319  ost->index, ost->enc ? ost->enc->name : "?");
3320  continue;
3321  }
3322 
3323  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3324  input_streams[ost->source_index]->file_index,
3325  input_streams[ost->source_index]->st->index,
3326  ost->file_index,
3327  ost->index);
3328  if (ost->sync_ist != input_streams[ost->source_index])
3329  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3330  ost->sync_ist->file_index,
3331  ost->sync_ist->st->index);
3332  if (ost->stream_copy)
3333  av_log(NULL, AV_LOG_INFO, " (copy)");
3334  else {
3335  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3336  const AVCodec *out_codec = ost->enc;
3337  const char *decoder_name = "?";
3338  const char *in_codec_name = "?";
3339  const char *encoder_name = "?";
3340  const char *out_codec_name = "?";
3341  const AVCodecDescriptor *desc;
3342 
3343  if (in_codec) {
3344  decoder_name = in_codec->name;
3345  desc = avcodec_descriptor_get(in_codec->id);
3346  if (desc)
3347  in_codec_name = desc->name;
3348  if (!strcmp(decoder_name, in_codec_name))
3349  decoder_name = "native";
3350  }
3351 
3352  if (out_codec) {
3353  encoder_name = out_codec->name;
3354  desc = avcodec_descriptor_get(out_codec->id);
3355  if (desc)
3356  out_codec_name = desc->name;
3357  if (!strcmp(encoder_name, out_codec_name))
3358  encoder_name = "native";
3359  }
3360 
3361  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3362  in_codec_name, decoder_name,
3363  out_codec_name, encoder_name);
3364  }
3365  av_log(NULL, AV_LOG_INFO, "\n");
3366  }
3367 
3368  if (ret) {
3369  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3370  return ret;
3371  }
3372 
3373  if (sdp_filename || want_sdp) {
3374  print_sdp();
3375  }
3376 
3377  transcode_init_done = 1;
3378 
3379  return 0;
3380 }
3381 
3382 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3383 static int need_output(void)
3384 {
3385  int i;
3386 
3387  for (i = 0; i < nb_output_streams; i++) {
3388  OutputStream *ost = output_streams[i];
3389  OutputFile *of = output_files[ost->file_index];
3390  AVFormatContext *os = output_files[ost->file_index]->ctx;
3391 
3392  if (ost->finished ||
3393  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3394  continue;
3395  if (ost->frame_number >= ost->max_frames) {
3396  int j;
3397  for (j = 0; j < of->ctx->nb_streams; j++)
3398  close_output_stream(output_streams[of->ost_index + j]);
3399  continue;
3400  }
3401 
3402  return 1;
3403  }
3404 
3405  return 0;
3406 }
3407 
3408 /**
3409  * Select the output stream to process.
3410  *
3411  * @return selected output stream, or NULL if none available
3412  */
3414 {
3415  int i;
3416  int64_t opts_min = INT64_MAX;
3417  OutputStream *ost_min = NULL;
3418 
3419  for (i = 0; i < nb_output_streams; i++) {
3420  OutputStream *ost = output_streams[i];
3421  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3422  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3423  AV_TIME_BASE_Q);
3424  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3425  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3426 
3427  if (!ost->finished && opts < opts_min) {
3428  opts_min = opts;
3429  ost_min = ost->unavailable ? NULL : ost;
3430  }
3431  }
3432  return ost_min;
3433 }
3434 
3435 static void set_tty_echo(int on)
3436 {
3437 #if HAVE_TERMIOS_H
3438  struct termios tty;
3439  if (tcgetattr(0, &tty) == 0) {
3440  if (on) tty.c_lflag |= ECHO;
3441  else tty.c_lflag &= ~ECHO;
3442  tcsetattr(0, TCSANOW, &tty);
3443  }
3444 #endif
3445 }
3446 
3448 {
3449  int i, ret, key;
3450  static int64_t last_time;
3451  if (received_nb_signals)
3452  return AVERROR_EXIT;
3453  /* read_key() returns 0 on EOF */
3454  if(cur_time - last_time >= 100000 && !run_as_daemon){
3455  key = read_key();
3456  last_time = cur_time;
3457  }else
3458  key = -1;
3459  if (key == 'q')
3460  return AVERROR_EXIT;
3461  if (key == '+') av_log_set_level(av_log_get_level()+10);
3462  if (key == '-') av_log_set_level(av_log_get_level()-10);
3463  if (key == 's') qp_hist ^= 1;
3464  if (key == 'h'){
3465  if (do_hex_dump){
3466  do_hex_dump = do_pkt_dump = 0;
3467  } else if(do_pkt_dump){
3468  do_hex_dump = 1;
3469  } else
3470  do_pkt_dump = 1;
3472  }
3473  if (key == 'c' || key == 'C'){
3474  char buf[4096], target[64], command[256], arg[256] = {0};
3475  double time;
3476  int k, n = 0;
3477  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3478  i = 0;
3479  set_tty_echo(1);
3480  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3481  if (k > 0)
3482  buf[i++] = k;
3483  buf[i] = 0;
3484  set_tty_echo(0);
3485  fprintf(stderr, "\n");
3486  if (k > 0 &&
3487  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3488  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3489  target, time, command, arg);
3490  for (i = 0; i < nb_filtergraphs; i++) {
3491  FilterGraph *fg = filtergraphs[i];
3492  if (fg->graph) {
3493  if (time < 0) {
3494  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3495  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3496  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3497  } else if (key == 'c') {
3498  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3499  ret = AVERROR_PATCHWELCOME;
3500  } else {
3501  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3502  if (ret < 0)
3503  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3504  }
3505  }
3506  }
3507  } else {
3509  "Parse error, at least 3 arguments were expected, "
3510  "only %d given in string '%s'\n", n, buf);
3511  }
3512  }
3513  if (key == 'd' || key == 'D'){
3514  int debug=0;
3515  if(key == 'D') {
3516  debug = input_streams[0]->st->codec->debug<<1;
3517  if(!debug) debug = 1;
3518  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3519  debug += debug;
3520  }else{
3521  char buf[32];
3522  int k = 0;
3523  i = 0;
3524  set_tty_echo(1);
3525  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3526  if (k > 0)
3527  buf[i++] = k;
3528  buf[i] = 0;
3529  set_tty_echo(0);
3530  fprintf(stderr, "\n");
3531  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3532  fprintf(stderr,"error parsing debug value\n");
3533  }
3534  for(i=0;i<nb_input_streams;i++) {
3535  input_streams[i]->st->codec->debug = debug;
3536  }
3537  for(i=0;i<nb_output_streams;i++) {
3538  OutputStream *ost = output_streams[i];
3539  ost->enc_ctx->debug = debug;
3540  }
3541  if(debug) av_log_set_level(AV_LOG_DEBUG);
3542  fprintf(stderr,"debug=%d\n", debug);
3543  }
3544  if (key == '?'){
3545  fprintf(stderr, "key function\n"
3546  "? show this help\n"
3547  "+ increase verbosity\n"
3548  "- decrease verbosity\n"
3549  "c Send command to first matching filter supporting it\n"
3550  "C Send/Que command to all matching filters\n"
3551  "D cycle through available debug modes\n"
3552  "h dump packets/hex press to cycle through the 3 states\n"
3553  "q quit\n"
3554  "s Show QP histogram\n"
3555  );
3556  }
3557  return 0;
3558 }
3559 
3560 #if HAVE_PTHREADS
3561 static void *input_thread(void *arg)
3562 {
3563  InputFile *f = arg;
3564  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3565  int ret = 0;
3566 
3567  while (1) {
3568  AVPacket pkt;
3569  ret = av_read_frame(f->ctx, &pkt);
3570 
3571  if (ret == AVERROR(EAGAIN)) {
3572  av_usleep(10000);
3573  continue;
3574  }
3575  if (ret < 0) {
3576  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3577  break;
3578  }
3579  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3580  if (flags && ret == AVERROR(EAGAIN)) {
3581  flags = 0;
3582  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3584  "Thread message queue blocking; consider raising the "
3585  "thread_queue_size option (current value: %d)\n",
3586  f->thread_queue_size);
3587  }
3588  if (ret < 0) {
3589  if (ret != AVERROR_EOF)
3590  av_log(f->ctx, AV_LOG_ERROR,
3591  "Unable to send packet to main thread: %s\n",
3592  av_err2str(ret));
3593  av_packet_unref(&pkt);
3594  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3595  break;
3596  }
3597  }
3598 
3599  return NULL;
3600 }
3601 
3602 static void free_input_threads(void)
3603 {
3604  int i;
3605 
3606  for (i = 0; i < nb_input_files; i++) {
3607  InputFile *f = input_files[i];
3608  AVPacket pkt;
3609 
3610  if (!f || !f->in_thread_queue)
3611  continue;
3613  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3614  av_packet_unref(&pkt);
3615 
3616  pthread_join(f->thread, NULL);
3617  f->joined = 1;
3618  av_thread_message_queue_free(&f->in_thread_queue);
3619  }
3620 }
3621 
3622 static int init_input_threads(void)
3623 {
3624  int i, ret;
3625 
3626  if (nb_input_files == 1)
3627  return 0;
3628 
3629  for (i = 0; i < nb_input_files; i++) {
3630  InputFile *f = input_files[i];
3631 
3632  if (f->ctx->pb ? !f->ctx->pb->seekable :
3633  strcmp(f->ctx->iformat->name, "lavfi"))
3634  f->non_blocking = 1;
3635  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3636  f->thread_queue_size, sizeof(AVPacket));
3637  if (ret < 0)
3638  return ret;
3639 
3640  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3641  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3642  av_thread_message_queue_free(&f->in_thread_queue);
3643  return AVERROR(ret);
3644  }
3645  }
3646  return 0;
3647 }
3648 
3649 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3650 {
3651  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3652  f->non_blocking ?
3654 }
3655 #endif
3656 
3658 {
3659  if (f->rate_emu) {
3660  int i;
3661  for (i = 0; i < f->nb_streams; i++) {
3662  InputStream *ist = input_streams[f->ist_index + i];
3663  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3664  int64_t now = av_gettime_relative() - ist->start;
3665  if (pts > now)
3666  return AVERROR(EAGAIN);
3667  }
3668  }
3669 
3670 #if HAVE_PTHREADS
3671  if (nb_input_files > 1)
3672  return get_input_packet_mt(f, pkt);
3673 #endif
3674  return av_read_frame(f->ctx, pkt);
3675 }
3676 
3677 static int got_eagain(void)
3678 {
3679  int i;
3680  for (i = 0; i < nb_output_streams; i++)
3681  if (output_streams[i]->unavailable)
3682  return 1;
3683  return 0;
3684 }
3685 
3686 static void reset_eagain(void)
3687 {
3688  int i;
3689  for (i = 0; i < nb_input_files; i++)
3690  input_files[i]->eagain = 0;
3691  for (i = 0; i < nb_output_streams; i++)
3692  output_streams[i]->unavailable = 0;
3693 }
3694 
3695 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3696 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3697  AVRational time_base)
3698 {
3699  int ret;
3700 
3701  if (!*duration) {
3702  *duration = tmp;
3703  return tmp_time_base;
3704  }
3705 
3706  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3707  if (ret < 0) {
3708  *duration = tmp;
3709  return tmp_time_base;
3710  }
3711 
3712  return time_base;
3713 }
3714 
3715 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3716 {
3717  InputStream *ist;
3718  AVCodecContext *avctx;
3719  int i, ret, has_audio = 0;
3720  int64_t duration = 0;
3721 
3722  ret = av_seek_frame(is, -1, is->start_time, 0);
3723  if (ret < 0)
3724  return ret;
3725 
3726  for (i = 0; i < ifile->nb_streams; i++) {
3727  ist = input_streams[ifile->ist_index + i];
3728  avctx = ist->dec_ctx;
3729 
3730  // flush decoders
3731  if (ist->decoding_needed) {
3732  process_input_packet(ist, NULL, 1);
3733  avcodec_flush_buffers(avctx);
3734  }
3735 
3736  /* duration is the length of the last frame in a stream
3737  * when audio stream is present we don't care about
3738  * last video frame length because it's not defined exactly */
3739  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3740  has_audio = 1;
3741  }
3742 
3743  for (i = 0; i < ifile->nb_streams; i++) {
3744  ist = input_streams[ifile->ist_index + i];
3745  avctx = ist->dec_ctx;
3746 
3747  if (has_audio) {
3748  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3749  AVRational sample_rate = {1, avctx->sample_rate};
3750 
3751  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3752  } else
3753  continue;
3754  } else {
3755  if (ist->framerate.num) {
3756  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3757  } else if (ist->st->avg_frame_rate.num) {
3758  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3759  } else duration = 1;
3760  }
3761  if (!ifile->duration)
3762  ifile->time_base = ist->st->time_base;
3763  /* the total duration of the stream, max_pts - min_pts is
3764  * the duration of the stream without the last frame */
3765  duration += ist->max_pts - ist->min_pts;
3766  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3767  ifile->time_base);
3768  }
3769 
3770  if (ifile->loop > 0)
3771  ifile->loop--;
3772 
3773  return ret;
3774 }
3775 
3776 /*
3777  * Return
3778  * - 0 -- one packet was read and processed
3779  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3780  * this function should be called again
3781  * - AVERROR_EOF -- this function should not be called again
3782  */
3783 static int process_input(int file_index)
3784 {
3785  InputFile *ifile = input_files[file_index];
3786  AVFormatContext *is;
3787  InputStream *ist;
3788  AVPacket pkt;
3789  int ret, i, j;
3790  int64_t duration;
3791  int64_t pkt_dts;
3792 
3793  is = ifile->ctx;
3794  ret = get_input_packet(ifile, &pkt);
3795 
3796  if (ret == AVERROR(EAGAIN)) {
3797  ifile->eagain = 1;
3798  return ret;
3799  }
3800  if (ret < 0 && ifile->loop) {
3801  if ((ret = seek_to_start(ifile, is)) < 0)
3802  return ret;
3803  ret = get_input_packet(ifile, &pkt);
3804  }
3805  if (ret < 0) {
3806  if (ret != AVERROR_EOF) {
3807  print_error(is->filename, ret);
3808  if (exit_on_error)
3809  exit_program(1);
3810  }
3811 
3812  for (i = 0; i < ifile->nb_streams; i++) {
3813  ist = input_streams[ifile->ist_index + i];
3814  if (ist->decoding_needed) {
3815  ret = process_input_packet(ist, NULL, 0);
3816  if (ret>0)
3817  return 0;
3818  }
3819 
3820  /* mark all outputs that don't go through lavfi as finished */
3821  for (j = 0; j < nb_output_streams; j++) {
3822  OutputStream *ost = output_streams[j];
3823 
3824  if (ost->source_index == ifile->ist_index + i &&
3825  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3826  finish_output_stream(ost);
3827  }
3828  }
3829 
3830  ifile->eof_reached = 1;
3831  return AVERROR(EAGAIN);
3832  }
3833 
3834  reset_eagain();
3835 
3836  if (do_pkt_dump) {
3838  is->streams[pkt.stream_index]);
3839  }
3840  /* the following test is needed in case new streams appear
3841  dynamically in stream : we ignore them */
3842  if (pkt.stream_index >= ifile->nb_streams) {
3843  report_new_stream(file_index, &pkt);
3844  goto discard_packet;
3845  }
3846 
3847  ist = input_streams[ifile->ist_index + pkt.stream_index];
3848 
3849  ist->data_size += pkt.size;
3850  ist->nb_packets++;
3851 
3852  if (ist->discard)
3853  goto discard_packet;
3854 
3855  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3856  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3857  exit_program(1);
3858  }
3859 
3860  if (debug_ts) {
3861  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3862  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3866  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3867  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3868  av_ts2str(input_files[ist->file_index]->ts_offset),
3869  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3870  }
3871 
3872  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3873  int64_t stime, stime2;
3874  // Correcting starttime based on the enabled streams
3875  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3876  // so we instead do it here as part of discontinuity handling
3877  if ( ist->next_dts == AV_NOPTS_VALUE
3878  && ifile->ts_offset == -is->start_time
3879  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3880  int64_t new_start_time = INT64_MAX;
3881  for (i=0; i<is->nb_streams; i++) {
3882  AVStream *st = is->streams[i];
3883  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3884  continue;
3885  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3886  }
3887  if (new_start_time > is->start_time) {
3888  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3889  ifile->ts_offset = -new_start_time;
3890  }
3891  }
3892 
3893  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3894  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3895  ist->wrap_correction_done = 1;
3896 
3897  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3898  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3899  ist->wrap_correction_done = 0;
3900  }
3901  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3902  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3903  ist->wrap_correction_done = 0;
3904  }
3905  }
3906 
3907  /* add the stream-global side data to the first packet */
3908  if (ist->nb_packets == 1) {
3909  if (ist->st->nb_side_data)
3911  for (i = 0; i < ist->st->nb_side_data; i++) {
3912  AVPacketSideData *src_sd = &ist->st->side_data[i];
3913  uint8_t *dst_data;
3914 
3915  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3916  continue;
3917  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3918  continue;
3919 
3920  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3921  if (!dst_data)
3922  exit_program(1);
3923 
3924  memcpy(dst_data, src_sd->data, src_sd->size);
3925  }
3926  }
3927 
3928  if (pkt.dts != AV_NOPTS_VALUE)
3929  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3930  if (pkt.pts != AV_NOPTS_VALUE)
3931  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3932 
3933  if (pkt.pts != AV_NOPTS_VALUE)
3934  pkt.pts *= ist->ts_scale;
3935  if (pkt.dts != AV_NOPTS_VALUE)
3936  pkt.dts *= ist->ts_scale;
3937 
3939  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3941  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3942  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3943  int64_t delta = pkt_dts - ifile->last_ts;
3944  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3945  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3946  ifile->ts_offset -= delta;
3948  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3949  delta, ifile->ts_offset);
3950  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3951  if (pkt.pts != AV_NOPTS_VALUE)
3952  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3953  }
3954  }
3955 
3956  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3957  if (pkt.pts != AV_NOPTS_VALUE) {
3958  pkt.pts += duration;
3959  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3960  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3961  }
3962 
3963  if (pkt.dts != AV_NOPTS_VALUE)
3964  pkt.dts += duration;
3965 
3967  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3969  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3970  !copy_ts) {
3971  int64_t delta = pkt_dts - ist->next_dts;
3972  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3973  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3974  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3975  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3976  ifile->ts_offset -= delta;
3978  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3979  delta, ifile->ts_offset);
3980  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3981  if (pkt.pts != AV_NOPTS_VALUE)
3982  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3983  }
3984  } else {
3985  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3986  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3987  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3988  pkt.dts = AV_NOPTS_VALUE;
3989  }
3990  if (pkt.pts != AV_NOPTS_VALUE){
3991  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3992  delta = pkt_pts - ist->next_dts;
3993  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3994  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3995  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3996  pkt.pts = AV_NOPTS_VALUE;
3997  }
3998  }
3999  }
4000  }
4001 
4002  if (pkt.dts != AV_NOPTS_VALUE)
4003  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4004 
4005  if (debug_ts) {
4006  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4008  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4009  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4010  av_ts2str(input_files[ist->file_index]->ts_offset),
4011  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4012  }
4013 
4014  sub2video_heartbeat(ist, pkt.pts);
4015 
4016  process_input_packet(ist, &pkt, 0);
4017 
4018 discard_packet:
4019  av_packet_unref(&pkt);
4020 
4021  return 0;
4022 }
4023 
4024 /**
4025  * Perform a step of transcoding for the specified filter graph.
4026  *
4027  * @param[in] graph filter graph to consider
4028  * @param[out] best_ist input stream where a frame would allow to continue
4029  * @return 0 for success, <0 for error
4030  */
4031 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4032 {
4033  int i, ret;
4034  int nb_requests, nb_requests_max = 0;
4035  InputFilter *ifilter;
4036  InputStream *ist;
4037 
4038  *best_ist = NULL;
4039  ret = avfilter_graph_request_oldest(graph->graph);
4040  if (ret >= 0)
4041  return reap_filters(0);
4042 
4043  if (ret == AVERROR_EOF) {
4044  ret = reap_filters(1);
4045  for (i = 0; i < graph->nb_outputs; i++)
4046  close_output_stream(graph->outputs[i]->ost);
4047  return ret;
4048  }
4049  if (ret != AVERROR(EAGAIN))
4050  return ret;
4051 
4052  for (i = 0; i < graph->nb_inputs; i++) {
4053  ifilter = graph->inputs[i];
4054  ist = ifilter->ist;
4055  if (input_files[ist->file_index]->eagain ||
4056  input_files[ist->file_index]->eof_reached)
4057  continue;
4058  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4059  if (nb_requests > nb_requests_max) {
4060  nb_requests_max = nb_requests;
4061  *best_ist = ist;
4062  }
4063  }
4064 
4065  if (!*best_ist)
4066  for (i = 0; i < graph->nb_outputs; i++)
4067  graph->outputs[i]->ost->unavailable = 1;
4068 
4069  return 0;
4070 }
4071 
4072 /**
4073  * Run a single step of transcoding.
4074  *
4075  * @return 0 for success, <0 for error
4076  */
4077 static int transcode_step(void)
4078 {
4079  OutputStream *ost;
4080  InputStream *ist;
4081  int ret;
4082 
4083  ost = choose_output();
4084  if (!ost) {
4085  if (got_eagain()) {
4086  reset_eagain();
4087  av_usleep(10000);
4088  return 0;
4089  }
4090  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4091  return AVERROR_EOF;
4092  }
4093 
4094  if (ost->filter) {
4095  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4096  return ret;
4097  if (!ist)
4098  return 0;
4099  } else {
4100  av_assert0(ost->source_index >= 0);
4101  ist = input_streams[ost->source_index];
4102  }
4103 
4104  ret = process_input(ist->file_index);
4105  if (ret == AVERROR(EAGAIN)) {
4106  if (input_files[ist->file_index]->eagain)
4107  ost->unavailable = 1;
4108  return 0;
4109  }
4110 
4111  if (ret < 0)
4112  return ret == AVERROR_EOF ? 0 : ret;
4113 
4114  return reap_filters(0);
4115 }
4116 
4117 /*
4118  * The following code is the main loop of the file converter
4119  */
4120 static int transcode(void)
4121 {
4122  int ret, i;
4123  AVFormatContext *os;
4124  OutputStream *ost;
4125  InputStream *ist;
4126  int64_t timer_start;
4127  int64_t total_packets_written = 0;
4128 
4129  ret = transcode_init();
4130  if (ret < 0)
4131  goto fail;
4132 
4133  if (stdin_interaction) {
4134  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4135  }
4136 
4137  timer_start = av_gettime_relative();
4138 
4139 #if HAVE_PTHREADS
4140  if ((ret = init_input_threads()) < 0)
4141  goto fail;
4142 #endif
4143 
4144  while (!received_sigterm) {
4145  int64_t cur_time= av_gettime_relative();
4146 
4147  /* if 'q' pressed, exits */
4148  if (stdin_interaction)
4149  if (check_keyboard_interaction(cur_time) < 0)
4150  break;
4151 
4152  /* check if there's any stream where output is still needed */
4153  if (!need_output()) {
4154  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4155  break;
4156  }
4157 
4158  ret = transcode_step();
4159  if (ret < 0 && ret != AVERROR_EOF) {
4160  char errbuf[128];
4161  av_strerror(ret, errbuf, sizeof(errbuf));
4162 
4163  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4164  break;
4165  }
4166 
4167  /* dump report by using the output first video and audio streams */
4168  print_report(0, timer_start, cur_time);
4169  }
4170 #if HAVE_PTHREADS
4171  free_input_threads();
4172 #endif
4173 
4174  /* at the end of stream, we must flush the decoder buffers */
4175  for (i = 0; i < nb_input_streams; i++) {
4176  ist = input_streams[i];
4177  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4178  process_input_packet(ist, NULL, 0);
4179  }
4180  }
4181  flush_encoders();
4182 
4183  term_exit();
4184 
4185  /* write the trailer if needed and close file */
4186  for (i = 0; i < nb_output_files; i++) {
4187  os = output_files[i]->ctx;
4188  if ((ret = av_write_trailer(os)) < 0) {
4189  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4190  if (exit_on_error)
4191  exit_program(1);
4192  }
4193  }
4194 
4195  /* dump report by using the first video and audio streams */
4196  print_report(1, timer_start, av_gettime_relative());
4197 
4198  /* close each encoder */
4199  for (i = 0; i < nb_output_streams; i++) {
4200  ost = output_streams[i];
4201  if (ost->encoding_needed) {
4202  av_freep(&ost->enc_ctx->stats_in);
4203  }
4204  total_packets_written += ost->packets_written;
4205  }
4206 
4207  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4208  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4209  exit_program(1);
4210  }
4211 
4212  /* close each decoder */
4213  for (i = 0; i < nb_input_streams; i++) {
4214  ist = input_streams[i];
4215  if (ist->decoding_needed) {
4216  avcodec_close(ist->dec_ctx);
4217  if (ist->hwaccel_uninit)
4218  ist->hwaccel_uninit(ist->dec_ctx);
4219  }
4220  }
4221 
4223 
4224  /* finished ! */
4225  ret = 0;
4226 
4227  fail:
4228 #if HAVE_PTHREADS
4229  free_input_threads();
4230 #endif
4231 
4232  if (output_streams) {
4233  for (i = 0; i < nb_output_streams; i++) {
4234  ost = output_streams[i];
4235  if (ost) {
4236  if (ost->logfile) {
4237  if (fclose(ost->logfile))
4239  "Error closing logfile, loss of information possible: %s\n",
4240  av_err2str(AVERROR(errno)));
4241  ost->logfile = NULL;
4242  }
4243  av_freep(&ost->forced_kf_pts);
4244  av_freep(&ost->apad);
4245  av_freep(&ost->disposition);
4246  av_dict_free(&ost->encoder_opts);
4247  av_dict_free(&ost->sws_dict);
4248  av_dict_free(&ost->swr_opts);
4249  av_dict_free(&ost->resample_opts);
4250  }
4251  }
4252  }
4253  return ret;
4254 }
4255 
4256 
4257 static int64_t getutime(void)
4258 {
4259 #if HAVE_GETRUSAGE
4260  struct rusage rusage;
4261 
4262  getrusage(RUSAGE_SELF, &rusage);
4263  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4264 #elif HAVE_GETPROCESSTIMES
4265  HANDLE proc;
4266  FILETIME c, e, k, u;
4267  proc = GetCurrentProcess();
4268  GetProcessTimes(proc, &c, &e, &k, &u);
4269  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4270 #else
4271  return av_gettime_relative();
4272 #endif
4273 }
4274 
4275 static int64_t getmaxrss(void)
4276 {
4277 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4278  struct rusage rusage;
4279  getrusage(RUSAGE_SELF, &rusage);
4280  return (int64_t)rusage.ru_maxrss * 1024;
4281 #elif HAVE_GETPROCESSMEMORYINFO
4282  HANDLE proc;
4283  PROCESS_MEMORY_COUNTERS memcounters;
4284  proc = GetCurrentProcess();
4285  memcounters.cb = sizeof(memcounters);
4286  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4287  return memcounters.PeakPagefileUsage;
4288 #else
4289  return 0;
4290 #endif
4291 }
4292 
4293 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4294 {
4295 }
4296 
4297 int main(int argc, char **argv)
4298 {
4299  int ret;
4300  int64_t ti;
4301 
4302  init_dynload();
4303 
4305 
4306  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4307 
4309  parse_loglevel(argc, argv, options);
4310 
4311  if(argc>1 && !strcmp(argv[1], "-d")){
4312  run_as_daemon=1;
4314  argc--;
4315  argv++;
4316  }
4317 
4319 #if CONFIG_AVDEVICE
4321 #endif
4323  av_register_all();
4325 
4326  show_banner(argc, argv, options);
4327 
4328  term_init();
4329 
4330  /* parse options and open all input/output files */
4331  ret = ffmpeg_parse_options(argc, argv);
4332  if (ret < 0)
4333  exit_program(1);
4334 
4335  if (nb_output_files <= 0 && nb_input_files == 0) {
4336  show_usage();
4337  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4338  exit_program(1);
4339  }
4340 
4341  /* file converter / grab */
4342  if (nb_output_files <= 0) {
4343  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4344  exit_program(1);
4345  }
4346 
4347 // if (nb_input_files == 0) {
4348 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4349 // exit_program(1);
4350 // }
4351 
4352  current_time = ti = getutime();
4353  if (transcode() < 0)
4354  exit_program(1);
4355  ti = getutime() - ti;
4356  if (do_benchmark) {
4357  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4358  }
4359  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4362  exit_program(69);
4363 
4365  return main_return_code;
4366 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1530
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:279
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:894
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:110
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2922
int got_output
Definition: ffmpeg.h:308
#define AV_DISPOSITION_METADATA
Definition: avformat.h:862
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1798
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1049
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1911
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:409
const struct AVCodec * codec
Definition: avcodec.h:1668
Definition: ffmpeg.h:386
AVRational framerate
Definition: avcodec.h:3358
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:335
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:499
void term_init(void)
Definition: ffmpeg.c:366
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:238
int nb_outputs
Definition: ffmpeg.h:255
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:461
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:265
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2263
int resample_channels
Definition: ffmpeg.h:303
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:308
int stream_copy
Definition: ffmpeg.h:466
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1147
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3895
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1579
AVOption.
Definition: opt.h:245
AVRational frame_rate
Definition: ffmpeg.h:431
int64_t * forced_kf_pts
Definition: ffmpeg.h:440
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
Immediately push the frame to the output.
Definition: buffersrc.h:47
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:288
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2998
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:456
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:873
#define CODEC_FLAG_PASS2
Definition: avcodec.h:1079
static int process_input(int file_index)
Definition: ffmpeg.c:3783
int exit_on_error
Definition: ffmpeg_opt.c:115
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2597
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1724
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1610
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:127
Memory buffer source API.
const char * desc
Definition: nvenc.c:89
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:292
AVCodecParserContext * parser
Definition: ffmpeg.h:474
static int64_t cur_time
Definition: ffserver.c:262
int64_t max_pts
Definition: ffmpeg.h:285
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2396
int decoding_needed
Definition: ffmpeg.h:263
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:945
int num
Numerator.
Definition: rational.h:59
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1507
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:435
int index
stream index in AVFormatContext
Definition: avformat.h:879
int size
Definition: avcodec.h:1591
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4275
const char * b
Definition: vf_curves.c:113
static int nb_frames_dup
Definition: ffmpeg.c:128
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2585
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:607
#define AV_DISPOSITION_DUB
Definition: avformat.h:832
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2070
int eagain
Definition: ffmpeg.h:357
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1775
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1142
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1887
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:419
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2280
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:604
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:844
int quality
Definition: ffmpeg.h:486
unsigned num_rects
Definition: avcodec.h:3933
AVFrame * filter_frame
Definition: ffmpeg.h:270
static int transcode_init(void)
Definition: ffmpeg.c:2813
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2592
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2283
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2836
int do_benchmark_all
Definition: ffmpeg_opt.c:108
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:846
int last_dropped
Definition: ffmpeg.h:425
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:658
discard all
Definition: avcodec.h:786
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:985
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:338
int64_t input_ts_offset
Definition: ffmpeg.h:363
int do_hex_dump
Definition: ffmpeg_opt.c:109
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3060
int nb_input_streams
Definition: ffmpeg.c:138
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:61
const char * name
Definition: ffmpeg.h:73
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)