FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int current_time;
134 
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194  r->x, r->y, r->w, r->h, w, h
195  );
196  return;
197  }
198 
199  dst += r->y * dst_linesize + r->x * 4;
200  src = r->pict.data[0];
201  pal = (uint32_t *)r->pict.data[1];
202  for (y = 0; y < r->h; y++) {
203  dst2 = (uint32_t *)dst;
204  src2 = src;
205  for (x = 0; x < r->w; x++)
206  *(dst2++) = pal[*(src2++)];
207  dst += dst_linesize;
208  src += r->pict.linesize[0];
209  }
210 }
211 
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
213 {
214  AVFrame *frame = ist->sub2video.frame;
215  int i;
216 
217  av_assert1(frame->data[0]);
218  ist->sub2video.last_pts = frame->pts = pts;
219  for (i = 0; i < ist->nb_filters; i++)
223 }
224 
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226 {
227  AVFrame *frame = ist->sub2video.frame;
228  int8_t *dst;
229  int dst_linesize;
230  int num_rects, i;
231  int64_t pts, end_pts;
232 
233  if (!frame)
234  return;
235  if (sub) {
236  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237  AV_TIME_BASE_Q, ist->st->time_base);
238  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  num_rects = sub->num_rects;
241  } else {
242  pts = ist->sub2video.end_pts;
243  end_pts = INT64_MAX;
244  num_rects = 0;
245  }
246  if (sub2video_get_blank_frame(ist) < 0) {
248  "Impossible to get a blank canvas.\n");
249  return;
250  }
251  dst = frame->data [0];
252  dst_linesize = frame->linesize[0];
253  for (i = 0; i < num_rects; i++)
254  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255  sub2video_push_ref(ist, pts);
256  ist->sub2video.end_pts = end_pts;
257 }
258 
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260 {
261  InputFile *infile = input_files[ist->file_index];
262  int i, j, nb_reqs;
263  int64_t pts2;
264 
265  /* When a frame is read from a file, examine all sub2video streams in
266  the same file and send the sub2video frame again. Otherwise, decoded
267  video frames could be accumulating in the filter graph while a filter
268  (possibly overlay) is desperately waiting for a subtitle frame. */
269  for (i = 0; i < infile->nb_streams; i++) {
270  InputStream *ist2 = input_streams[infile->ist_index + i];
271  if (!ist2->sub2video.frame)
272  continue;
273  /* subtitles seem to be usually muxed ahead of other streams;
274  if not, subtracting a larger time here is necessary */
275  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276  /* do not send the heartbeat frame if the subtitle is already ahead */
277  if (pts2 <= ist2->sub2video.last_pts)
278  continue;
279  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280  sub2video_update(ist2, NULL);
281  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283  if (nb_reqs)
284  sub2video_push_ref(ist2, pts2);
285  }
286 }
287 
288 static void sub2video_flush(InputStream *ist)
289 {
290  int i;
291 
292  if (ist->sub2video.end_pts < INT64_MAX)
293  sub2video_update(ist, NULL);
294  for (i = 0; i < ist->nb_filters; i++)
296 }
297 
298 /* end of sub2video hack */
299 
300 static void term_exit_sigsafe(void)
301 {
302 #if HAVE_TERMIOS_H
303  if(restore_tty)
304  tcsetattr (0, TCSANOW, &oldtty);
305 #endif
306 }
307 
308 void term_exit(void)
309 {
310  av_log(NULL, AV_LOG_QUIET, "%s", "");
312 }
313 
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
319 
320 static void
322 {
323  received_sigterm = sig;
326  if(received_nb_signals > 3) {
327  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328  strlen("Received > 3 system signals, hard exiting\n"));
329 
330  exit(123);
331  }
332 }
333 
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336 {
337  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338 
339  switch (fdwCtrlType)
340  {
341  case CTRL_C_EVENT:
342  case CTRL_BREAK_EVENT:
343  sigterm_handler(SIGINT);
344  return TRUE;
345 
346  case CTRL_CLOSE_EVENT:
347  case CTRL_LOGOFF_EVENT:
348  case CTRL_SHUTDOWN_EVENT:
349  sigterm_handler(SIGTERM);
350  /* Basically, with these 3 events, when we return from this method the
351  process is hard terminated, so stall as long as we need to
352  to try and let the main thread(s) clean up and gracefully terminate
353  (we have at most 5 seconds, but should be done far before that). */
354  while (!ffmpeg_exited) {
355  Sleep(0);
356  }
357  return TRUE;
358 
359  default:
360  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361  return FALSE;
362  }
363 }
364 #endif
365 
366 void term_init(void)
367 {
368 #if HAVE_TERMIOS_H
369  if(!run_as_daemon){
370  struct termios tty;
371  if (tcgetattr (0, &tty) == 0) {
372  oldtty = tty;
373  restore_tty = 1;
374 
375  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376  |INLCR|IGNCR|ICRNL|IXON);
377  tty.c_oflag |= OPOST;
378  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379  tty.c_cflag &= ~(CSIZE|PARENB);
380  tty.c_cflag |= CS8;
381  tty.c_cc[VMIN] = 1;
382  tty.c_cc[VTIME] = 0;
383 
384  tcsetattr (0, TCSANOW, &tty);
385  }
386  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387  }
388 #endif
389 
390  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392 #ifdef SIGXCPU
393  signal(SIGXCPU, sigterm_handler);
394 #endif
395 #if HAVE_SETCONSOLECTRLHANDLER
396  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397 #endif
398 }
399 
400 /* read a key without blocking */
401 static int read_key(void)
402 {
403  unsigned char ch;
404 #if HAVE_TERMIOS_H
405  int n = 1;
406  struct timeval tv;
407  fd_set rfds;
408 
409  FD_ZERO(&rfds);
410  FD_SET(0, &rfds);
411  tv.tv_sec = 0;
412  tv.tv_usec = 0;
413  n = select(1, &rfds, NULL, NULL, &tv);
414  if (n > 0) {
415  n = read(0, &ch, 1);
416  if (n == 1)
417  return ch;
418 
419  return n;
420  }
421 #elif HAVE_KBHIT
422 # if HAVE_PEEKNAMEDPIPE
423  static int is_pipe;
424  static HANDLE input_handle;
425  DWORD dw, nchars;
426  if(!input_handle){
427  input_handle = GetStdHandle(STD_INPUT_HANDLE);
428  is_pipe = !GetConsoleMode(input_handle, &dw);
429  }
430 
431  if (is_pipe) {
432  /* When running under a GUI, you will end here. */
433  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434  // input pipe may have been closed by the program that ran ffmpeg
435  return -1;
436  }
437  //Read it
438  if(nchars != 0) {
439  read(0, &ch, 1);
440  return ch;
441  }else{
442  return -1;
443  }
444  }
445 # endif
446  if(kbhit())
447  return(getch());
448 #endif
449  return -1;
450 }
451 
452 static int decode_interrupt_cb(void *ctx)
453 {
455 }
456 
458 
459 static void ffmpeg_cleanup(int ret)
460 {
461  int i, j;
462 
463  if (do_benchmark) {
464  int maxrss = getmaxrss() / 1024;
465  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466  }
467 
468  for (i = 0; i < nb_filtergraphs; i++) {
469  FilterGraph *fg = filtergraphs[i];
471  for (j = 0; j < fg->nb_inputs; j++) {
472  av_freep(&fg->inputs[j]->name);
473  av_freep(&fg->inputs[j]);
474  }
475  av_freep(&fg->inputs);
476  for (j = 0; j < fg->nb_outputs; j++) {
477  av_freep(&fg->outputs[j]->name);
478  av_freep(&fg->outputs[j]);
479  }
480  av_freep(&fg->outputs);
481  av_freep(&fg->graph_desc);
482 
483  av_freep(&filtergraphs[i]);
484  }
485  av_freep(&filtergraphs);
486 
488 
489  /* close files */
490  for (i = 0; i < nb_output_files; i++) {
491  OutputFile *of = output_files[i];
493  if (!of)
494  continue;
495  s = of->ctx;
496  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497  avio_closep(&s->pb);
499  av_dict_free(&of->opts);
500 
501  av_freep(&output_files[i]);
502  }
503  for (i = 0; i < nb_output_streams; i++) {
504  OutputStream *ost = output_streams[i];
506 
507  if (!ost)
508  continue;
509 
510  bsfc = ost->bitstream_filters;
511  while (bsfc) {
512  AVBitStreamFilterContext *next = bsfc->next;
514  bsfc = next;
515  }
516  ost->bitstream_filters = NULL;
518  av_frame_free(&ost->last_frame);
519 
520  av_parser_close(ost->parser);
521 
522  av_freep(&ost->forced_keyframes);
524  av_freep(&ost->avfilter);
525  av_freep(&ost->logfile_prefix);
526 
528  ost->audio_channels_mapped = 0;
529 
530  av_dict_free(&ost->sws_dict);
531 
533 
534  av_freep(&output_streams[i]);
535  }
536 #if HAVE_PTHREADS
537  free_input_threads();
538 #endif
539  for (i = 0; i < nb_input_files; i++) {
540  avformat_close_input(&input_files[i]->ctx);
541  av_freep(&input_files[i]);
542  }
543  for (i = 0; i < nb_input_streams; i++) {
544  InputStream *ist = input_streams[i];
545 
548  av_dict_free(&ist->decoder_opts);
551  av_freep(&ist->filters);
552  av_freep(&ist->hwaccel_device);
553 
555 
556  av_freep(&input_streams[i]);
557  }
558 
559  if (vstats_file) {
560  if (fclose(vstats_file))
562  "Error closing vstats file, loss of information possible: %s\n",
563  av_err2str(AVERROR(errno)));
564  }
566 
567  av_freep(&input_streams);
568  av_freep(&input_files);
569  av_freep(&output_streams);
570  av_freep(&output_files);
571 
572  uninit_opts();
573 
575 
576  if (received_sigterm) {
577  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578  (int) received_sigterm);
579  } else if (ret && transcode_init_done) {
580  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581  }
582  term_exit();
583  ffmpeg_exited = 1;
584 }
585 
587 {
588  AVDictionaryEntry *t = NULL;
589 
590  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592  }
593 }
594 
596 {
598  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600  exit_program(1);
601  }
602 }
603 
604 static void abort_codec_experimental(AVCodec *c, int encoder)
605 {
606  exit_program(1);
607 }
608 
609 static void update_benchmark(const char *fmt, ...)
610 {
611  if (do_benchmark_all) {
612  int64_t t = getutime();
613  va_list va;
614  char buf[1024];
615 
616  if (fmt) {
617  va_start(va, fmt);
618  vsnprintf(buf, sizeof(buf), fmt, va);
619  va_end(va);
620  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621  }
622  current_time = t;
623  }
624 }
625 
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 {
628  int i;
629  for (i = 0; i < nb_output_streams; i++) {
630  OutputStream *ost2 = output_streams[i];
631  ost2->finished |= ost == ost2 ? this_stream : others;
632  }
633 }
634 
636 {
638  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639  int ret;
640 
641  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642  ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643  if (ost->st->codec->extradata) {
644  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645  ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
646  }
647  }
648 
651  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652 
653  /*
654  * Audio encoders may split the packets -- #frames in != #packets out.
655  * But there is no reordering, so we can limit the number of output packets
656  * by simply dropping them here.
657  * Counting encoded video frames needs to be done separately because of
658  * reordering, see do_video_out()
659  */
660  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661  if (ost->frame_number >= ost->max_frames) {
662  av_packet_unref(pkt);
663  return;
664  }
665  ost->frame_number++;
666  }
667  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668  int i;
670  NULL);
671  ost->quality = sd ? AV_RL32(sd) : -1;
672  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
673 
674  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
675  if (sd && i < sd[5])
676  ost->error[i] = AV_RL64(sd + 8 + 8*i);
677  else
678  ost->error[i] = -1;
679  }
680 
681  if (ost->frame_rate.num && ost->is_cfr) {
682  if (pkt->duration > 0)
683  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
685  ost->st->time_base);
686  }
687  }
688 
689  if (bsfc)
691 
692  if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693  print_error("", ret);
694  if (exit_on_error)
695  exit_program(1);
696  }
697  if (pkt->size == 0 && pkt->side_data_elems == 0)
698  return;
699  if (!ost->st->codecpar->extradata && avctx->extradata) {
701  if (!ost->st->codecpar->extradata) {
702  av_log(NULL, AV_LOG_ERROR, "Could not allocate extradata buffer to copy parser data.\n");
703  exit_program(1);
704  }
705  ost->st->codecpar->extradata_size = avctx->extradata_size;
706  memcpy(ost->st->codecpar->extradata, avctx->extradata, avctx->extradata_size);
707  }
708 
709  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
710  if (pkt->dts != AV_NOPTS_VALUE &&
711  pkt->pts != AV_NOPTS_VALUE &&
712  pkt->dts > pkt->pts) {
713  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
714  pkt->dts, pkt->pts,
715  ost->file_index, ost->st->index);
716  pkt->pts =
717  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
718  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
719  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
720  }
721  if(
722  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
723  pkt->dts != AV_NOPTS_VALUE &&
724  !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
725  ost->last_mux_dts != AV_NOPTS_VALUE) {
726  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
727  if (pkt->dts < max) {
728  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
729  av_log(s, loglevel, "Non-monotonous DTS in output stream "
730  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
731  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
732  if (exit_on_error) {
733  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
734  exit_program(1);
735  }
736  av_log(s, loglevel, "changing to %"PRId64". This may result "
737  "in incorrect timestamps in the output file.\n",
738  max);
739  if(pkt->pts >= pkt->dts)
740  pkt->pts = FFMAX(pkt->pts, max);
741  pkt->dts = max;
742  }
743  }
744  }
745  ost->last_mux_dts = pkt->dts;
746 
747  ost->data_size += pkt->size;
748  ost->packets_written++;
749 
750  pkt->stream_index = ost->index;
751 
752  if (debug_ts) {
753  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
754  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
756  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
757  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
758  pkt->size
759  );
760  }
761 
762  ret = av_interleaved_write_frame(s, pkt);
763  if (ret < 0) {
764  print_error("av_interleaved_write_frame()", ret);
765  main_return_code = 1;
767  }
768  av_packet_unref(pkt);
769 }
770 
772 {
773  OutputFile *of = output_files[ost->file_index];
774 
775  ost->finished |= ENCODER_FINISHED;
776  if (of->shortest) {
777  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
778  of->recording_time = FFMIN(of->recording_time, end);
779  }
780 }
781 
783 {
784  OutputFile *of = output_files[ost->file_index];
785 
786  if (of->recording_time != INT64_MAX &&
788  AV_TIME_BASE_Q) >= 0) {
789  close_output_stream(ost);
790  return 0;
791  }
792  return 1;
793 }
794 
796  AVFrame *frame)
797 {
798  AVCodecContext *enc = ost->enc_ctx;
799  AVPacket pkt;
800  int got_packet = 0;
801 
802  av_init_packet(&pkt);
803  pkt.data = NULL;
804  pkt.size = 0;
805 
806  if (!check_recording_time(ost))
807  return;
808 
809  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
810  frame->pts = ost->sync_opts;
811  ost->sync_opts = frame->pts + frame->nb_samples;
812  ost->samples_encoded += frame->nb_samples;
813  ost->frames_encoded++;
814 
815  av_assert0(pkt.size || !pkt.data);
817  if (debug_ts) {
818  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
819  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
820  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
821  enc->time_base.num, enc->time_base.den);
822  }
823 
824  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
825  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
826  exit_program(1);
827  }
828  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
829 
830  if (got_packet) {
831  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
832 
833  if (debug_ts) {
834  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
835  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
836  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
837  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
838  }
839 
840  write_frame(s, &pkt, ost);
841  }
842 }
843 
845  OutputStream *ost,
846  InputStream *ist,
847  AVSubtitle *sub)
848 {
849  int subtitle_out_max_size = 1024 * 1024;
850  int subtitle_out_size, nb, i;
851  AVCodecContext *enc;
852  AVPacket pkt;
853  int64_t pts;
854 
855  if (sub->pts == AV_NOPTS_VALUE) {
856  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
857  if (exit_on_error)
858  exit_program(1);
859  return;
860  }
861 
862  enc = ost->enc_ctx;
863 
864  if (!subtitle_out) {
865  subtitle_out = av_malloc(subtitle_out_max_size);
866  if (!subtitle_out) {
867  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
868  exit_program(1);
869  }
870  }
871 
872  /* Note: DVB subtitle need one packet to draw them and one other
873  packet to clear them */
874  /* XXX: signal it in the codec context ? */
876  nb = 2;
877  else
878  nb = 1;
879 
880  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
881  pts = sub->pts;
882  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
883  pts -= output_files[ost->file_index]->start_time;
884  for (i = 0; i < nb; i++) {
885  unsigned save_num_rects = sub->num_rects;
886 
887  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
888  if (!check_recording_time(ost))
889  return;
890 
891  sub->pts = pts;
892  // start_display_time is required to be 0
893  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
895  sub->start_display_time = 0;
896  if (i == 1)
897  sub->num_rects = 0;
898 
899  ost->frames_encoded++;
900 
901  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
902  subtitle_out_max_size, sub);
903  if (i == 1)
904  sub->num_rects = save_num_rects;
905  if (subtitle_out_size < 0) {
906  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
907  exit_program(1);
908  }
909 
910  av_init_packet(&pkt);
911  pkt.data = subtitle_out;
912  pkt.size = subtitle_out_size;
913  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
914  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
915  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
916  /* XXX: the pts correction is handled here. Maybe handling
917  it in the codec would be better */
918  if (i == 0)
919  pkt.pts += 90 * sub->start_display_time;
920  else
921  pkt.pts += 90 * sub->end_display_time;
922  }
923  pkt.dts = pkt.pts;
924  write_frame(s, &pkt, ost);
925  }
926 }
927 
929  OutputStream *ost,
930  AVFrame *next_picture,
931  double sync_ipts)
932 {
933  int ret, format_video_sync;
934  AVPacket pkt;
935  AVCodecContext *enc = ost->enc_ctx;
936  AVCodecContext *mux_enc = ost->st->codec;
937  int nb_frames, nb0_frames, i;
938  double delta, delta0;
939  double duration = 0;
940  int frame_size = 0;
941  InputStream *ist = NULL;
943 
944  if (ost->source_index >= 0)
945  ist = input_streams[ost->source_index];
946 
947  if (filter->inputs[0]->frame_rate.num > 0 &&
948  filter->inputs[0]->frame_rate.den > 0)
949  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
950 
951  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
952  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
953 
954  if (!ost->filters_script &&
955  !ost->filters &&
956  next_picture &&
957  ist &&
958  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
959  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
960  }
961 
962  if (!next_picture) {
963  //end, flushing
964  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
965  ost->last_nb0_frames[1],
966  ost->last_nb0_frames[2]);
967  } else {
968  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
969  delta = delta0 + duration;
970 
971  /* by default, we output a single frame */
972  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
973  nb_frames = 1;
974 
975  format_video_sync = video_sync_method;
976  if (format_video_sync == VSYNC_AUTO) {
977  if(!strcmp(s->oformat->name, "avi")) {
978  format_video_sync = VSYNC_VFR;
979  } else
981  if ( ist
982  && format_video_sync == VSYNC_CFR
983  && input_files[ist->file_index]->ctx->nb_streams == 1
984  && input_files[ist->file_index]->input_ts_offset == 0) {
985  format_video_sync = VSYNC_VSCFR;
986  }
987  if (format_video_sync == VSYNC_CFR && copy_ts) {
988  format_video_sync = VSYNC_VSCFR;
989  }
990  }
991  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
992 
993  if (delta0 < 0 &&
994  delta > 0 &&
995  format_video_sync != VSYNC_PASSTHROUGH &&
996  format_video_sync != VSYNC_DROP) {
997  if (delta0 < -0.6) {
998  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
999  } else
1000  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1001  sync_ipts = ost->sync_opts;
1002  duration += delta0;
1003  delta0 = 0;
1004  }
1005 
1006  switch (format_video_sync) {
1007  case VSYNC_VSCFR:
1008  if (ost->frame_number == 0 && delta0 >= 0.5) {
1009  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1010  delta = duration;
1011  delta0 = 0;
1012  ost->sync_opts = lrint(sync_ipts);
1013  }
1014  case VSYNC_CFR:
1015  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1016  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1017  nb_frames = 0;
1018  } else if (delta < -1.1)
1019  nb_frames = 0;
1020  else if (delta > 1.1) {
1021  nb_frames = lrintf(delta);
1022  if (delta0 > 1.1)
1023  nb0_frames = lrintf(delta0 - 0.6);
1024  }
1025  break;
1026  case VSYNC_VFR:
1027  if (delta <= -0.6)
1028  nb_frames = 0;
1029  else if (delta > 0.6)
1030  ost->sync_opts = lrint(sync_ipts);
1031  break;
1032  case VSYNC_DROP:
1033  case VSYNC_PASSTHROUGH:
1034  ost->sync_opts = lrint(sync_ipts);
1035  break;
1036  default:
1037  av_assert0(0);
1038  }
1039  }
1040 
1041  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1042  nb0_frames = FFMIN(nb0_frames, nb_frames);
1043 
1044  memmove(ost->last_nb0_frames + 1,
1045  ost->last_nb0_frames,
1046  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1047  ost->last_nb0_frames[0] = nb0_frames;
1048 
1049  if (nb0_frames == 0 && ost->last_dropped) {
1050  nb_frames_drop++;
1052  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1053  ost->frame_number, ost->st->index, ost->last_frame->pts);
1054  }
1055  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1056  if (nb_frames > dts_error_threshold * 30) {
1057  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1058  nb_frames_drop++;
1059  return;
1060  }
1061  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1062  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1063  }
1064  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1065 
1066  /* duplicates frame if needed */
1067  for (i = 0; i < nb_frames; i++) {
1068  AVFrame *in_picture;
1069  av_init_packet(&pkt);
1070  pkt.data = NULL;
1071  pkt.size = 0;
1072 
1073  if (i < nb0_frames && ost->last_frame) {
1074  in_picture = ost->last_frame;
1075  } else
1076  in_picture = next_picture;
1077 
1078  if (!in_picture)
1079  return;
1080 
1081  in_picture->pts = ost->sync_opts;
1082 
1083 #if 1
1084  if (!check_recording_time(ost))
1085 #else
1086  if (ost->frame_number >= ost->max_frames)
1087 #endif
1088  return;
1089 
1090 #if FF_API_LAVF_FMT_RAWPICTURE
1091  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1092  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1093  /* raw pictures are written as AVPicture structure to
1094  avoid any copies. We support temporarily the older
1095  method. */
1096  if (in_picture->interlaced_frame)
1097  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1098  else
1099  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1100  pkt.data = (uint8_t *)in_picture;
1101  pkt.size = sizeof(AVPicture);
1102  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1103  pkt.flags |= AV_PKT_FLAG_KEY;
1104 
1105  write_frame(s, &pkt, ost);
1106  } else
1107 #endif
1108  {
1109  int got_packet, forced_keyframe = 0;
1110  double pts_time;
1111 
1113  ost->top_field_first >= 0)
1114  in_picture->top_field_first = !!ost->top_field_first;
1115 
1116  if (in_picture->interlaced_frame) {
1117  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1118  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1119  else
1120  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1121  } else
1122  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1123 
1124  in_picture->quality = enc->global_quality;
1125  in_picture->pict_type = 0;
1126 
1127  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1128  in_picture->pts * av_q2d(enc->time_base) : NAN;
1129  if (ost->forced_kf_index < ost->forced_kf_count &&
1130  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1131  ost->forced_kf_index++;
1132  forced_keyframe = 1;
1133  } else if (ost->forced_keyframes_pexpr) {
1134  double res;
1135  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1138  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1144  res);
1145  if (res) {
1146  forced_keyframe = 1;
1152  }
1153 
1155  } else if ( ost->forced_keyframes
1156  && !strncmp(ost->forced_keyframes, "source", 6)
1157  && in_picture->key_frame==1) {
1158  forced_keyframe = 1;
1159  }
1160 
1161  if (forced_keyframe) {
1162  in_picture->pict_type = AV_PICTURE_TYPE_I;
1163  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1164  }
1165 
1167  if (debug_ts) {
1168  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1169  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1170  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1171  enc->time_base.num, enc->time_base.den);
1172  }
1173 
1174  ost->frames_encoded++;
1175 
1176  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1177  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1178  if (ret < 0) {
1179  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1180  exit_program(1);
1181  }
1182 
1183  if (got_packet) {
1184  if (debug_ts) {
1185  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1188  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1189  }
1190 
1191  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1192  pkt.pts = ost->sync_opts;
1193 
1194  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1195 
1196  if (debug_ts) {
1197  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1198  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1199  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1200  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1201  }
1202 
1203  frame_size = pkt.size;
1204  write_frame(s, &pkt, ost);
1205 
1206  /* if two pass, output log */
1207  if (ost->logfile && enc->stats_out) {
1208  fprintf(ost->logfile, "%s", enc->stats_out);
1209  }
1210  }
1211  }
1212  ost->sync_opts++;
1213  /*
1214  * For video, number of frames in == number of packets out.
1215  * But there may be reordering, so we can't throw away frames on encoder
1216  * flush, we need to limit them here, before they go into encoder.
1217  */
1218  ost->frame_number++;
1219 
1220  if (vstats_filename && frame_size)
1221  do_video_stats(ost, frame_size);
1222  }
1223 
1224  if (!ost->last_frame)
1225  ost->last_frame = av_frame_alloc();
1226  av_frame_unref(ost->last_frame);
1227  if (next_picture && ost->last_frame)
1228  av_frame_ref(ost->last_frame, next_picture);
1229  else
1230  av_frame_free(&ost->last_frame);
1231 }
1232 
1233 static double psnr(double d)
1234 {
1235  return -10.0 * log10(d);
1236 }
1237 
1239 {
1240  AVCodecContext *enc;
1241  int frame_number;
1242  double ti1, bitrate, avg_bitrate;
1243 
1244  /* this is executed just the first time do_video_stats is called */
1245  if (!vstats_file) {
1246  vstats_file = fopen(vstats_filename, "w");
1247  if (!vstats_file) {
1248  perror("fopen");
1249  exit_program(1);
1250  }
1251  }
1252 
1253  enc = ost->enc_ctx;
1254  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1255  frame_number = ost->st->nb_frames;
1256  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1257  ost->quality / (float)FF_QP2LAMBDA);
1258 
1259  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1260  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1261 
1262  fprintf(vstats_file,"f_size= %6d ", frame_size);
1263  /* compute pts value */
1264  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1265  if (ti1 < 0.01)
1266  ti1 = 0.01;
1267 
1268  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1269  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1270  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1271  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1272  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1273  }
1274 }
1275 
1277 {
1278  OutputFile *of = output_files[ost->file_index];
1279  int i;
1280 
1282 
1283  if (of->shortest) {
1284  for (i = 0; i < of->ctx->nb_streams; i++)
1285  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1286  }
1287 }
1288 
1289 /**
1290  * Get and encode new output from any of the filtergraphs, without causing
1291  * activity.
1292  *
1293  * @return 0 for success, <0 for severe errors
1294  */
1295 static int reap_filters(int flush)
1296 {
1297  AVFrame *filtered_frame = NULL;
1298  int i;
1299 
1300  /* Reap all buffers present in the buffer sinks */
1301  for (i = 0; i < nb_output_streams; i++) {
1302  OutputStream *ost = output_streams[i];
1303  OutputFile *of = output_files[ost->file_index];
1305  AVCodecContext *enc = ost->enc_ctx;
1306  int ret = 0;
1307 
1308  if (!ost->filter)
1309  continue;
1310  filter = ost->filter->filter;
1311 
1312  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1313  return AVERROR(ENOMEM);
1314  }
1315  filtered_frame = ost->filtered_frame;
1316 
1317  while (1) {
1318  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1319  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1321  if (ret < 0) {
1322  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1324  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1325  } else if (flush && ret == AVERROR_EOF) {
1326  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1327  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1328  }
1329  break;
1330  }
1331  if (ost->finished) {
1332  av_frame_unref(filtered_frame);
1333  continue;
1334  }
1335  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1336  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1337  AVRational tb = enc->time_base;
1338  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1339 
1340  tb.den <<= extra_bits;
1341  float_pts =
1342  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1343  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1344  float_pts /= 1 << extra_bits;
1345  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1346  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1347 
1348  filtered_frame->pts =
1349  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1350  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1351  }
1352  //if (ost->source_index >= 0)
1353  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1354 
1355  switch (filter->inputs[0]->type) {
1356  case AVMEDIA_TYPE_VIDEO:
1357  if (!ost->frame_aspect_ratio.num)
1358  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1359 
1360  if (debug_ts) {
1361  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1362  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1363  float_pts,
1364  enc->time_base.num, enc->time_base.den);
1365  }
1366 
1367  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1368  break;
1369  case AVMEDIA_TYPE_AUDIO:
1370  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1371  enc->channels != av_frame_get_channels(filtered_frame)) {
1373  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1374  break;
1375  }
1376  do_audio_out(of->ctx, ost, filtered_frame);
1377  break;
1378  default:
1379  // TODO support subtitle filters
1380  av_assert0(0);
1381  }
1382 
1383  av_frame_unref(filtered_frame);
1384  }
1385  }
1386 
1387  return 0;
1388 }
1389 
1390 static void print_final_stats(int64_t total_size)
1391 {
1392  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1393  uint64_t subtitle_size = 0;
1394  uint64_t data_size = 0;
1395  float percent = -1.0;
1396  int i, j;
1397  int pass1_used = 1;
1398 
1399  for (i = 0; i < nb_output_streams; i++) {
1400  OutputStream *ost = output_streams[i];
1401  switch (ost->enc_ctx->codec_type) {
1402  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1403  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1404  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1405  default: other_size += ost->data_size; break;
1406  }
1407  extra_size += ost->enc_ctx->extradata_size;
1408  data_size += ost->data_size;
1411  pass1_used = 0;
1412  }
1413 
1414  if (data_size && total_size>0 && total_size >= data_size)
1415  percent = 100.0 * (total_size - data_size) / data_size;
1416 
1417  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1418  video_size / 1024.0,
1419  audio_size / 1024.0,
1420  subtitle_size / 1024.0,
1421  other_size / 1024.0,
1422  extra_size / 1024.0);
1423  if (percent >= 0.0)
1424  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1425  else
1426  av_log(NULL, AV_LOG_INFO, "unknown");
1427  av_log(NULL, AV_LOG_INFO, "\n");
1428 
1429  /* print verbose per-stream stats */
1430  for (i = 0; i < nb_input_files; i++) {
1431  InputFile *f = input_files[i];
1432  uint64_t total_packets = 0, total_size = 0;
1433 
1434  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1435  i, f->ctx->filename);
1436 
1437  for (j = 0; j < f->nb_streams; j++) {
1438  InputStream *ist = input_streams[f->ist_index + j];
1439  enum AVMediaType type = ist->dec_ctx->codec_type;
1440 
1441  total_size += ist->data_size;
1442  total_packets += ist->nb_packets;
1443 
1444  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1445  i, j, media_type_string(type));
1446  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1447  ist->nb_packets, ist->data_size);
1448 
1449  if (ist->decoding_needed) {
1450  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1451  ist->frames_decoded);
1452  if (type == AVMEDIA_TYPE_AUDIO)
1453  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1454  av_log(NULL, AV_LOG_VERBOSE, "; ");
1455  }
1456 
1457  av_log(NULL, AV_LOG_VERBOSE, "\n");
1458  }
1459 
1460  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1461  total_packets, total_size);
1462  }
1463 
1464  for (i = 0; i < nb_output_files; i++) {
1465  OutputFile *of = output_files[i];
1466  uint64_t total_packets = 0, total_size = 0;
1467 
1468  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1469  i, of->ctx->filename);
1470 
1471  for (j = 0; j < of->ctx->nb_streams; j++) {
1472  OutputStream *ost = output_streams[of->ost_index + j];
1473  enum AVMediaType type = ost->enc_ctx->codec_type;
1474 
1475  total_size += ost->data_size;
1476  total_packets += ost->packets_written;
1477 
1478  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1479  i, j, media_type_string(type));
1480  if (ost->encoding_needed) {
1481  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1482  ost->frames_encoded);
1483  if (type == AVMEDIA_TYPE_AUDIO)
1484  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1485  av_log(NULL, AV_LOG_VERBOSE, "; ");
1486  }
1487 
1488  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1489  ost->packets_written, ost->data_size);
1490 
1491  av_log(NULL, AV_LOG_VERBOSE, "\n");
1492  }
1493 
1494  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1495  total_packets, total_size);
1496  }
1497  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1498  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1499  if (pass1_used) {
1500  av_log(NULL, AV_LOG_WARNING, "\n");
1501  } else {
1502  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1503  }
1504  }
1505 }
1506 
1507 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1508 {
1509  char buf[1024];
1510  AVBPrint buf_script;
1511  OutputStream *ost;
1512  AVFormatContext *oc;
1513  int64_t total_size;
1514  AVCodecContext *enc;
1515  int frame_number, vid, i;
1516  double bitrate;
1517  double speed;
1518  int64_t pts = INT64_MIN + 1;
1519  static int64_t last_time = -1;
1520  static int qp_histogram[52];
1521  int hours, mins, secs, us;
1522  int ret;
1523  float t;
1524 
1525  if (!print_stats && !is_last_report && !progress_avio)
1526  return;
1527 
1528  if (!is_last_report) {
1529  if (last_time == -1) {
1530  last_time = cur_time;
1531  return;
1532  }
1533  if ((cur_time - last_time) < 500000)
1534  return;
1535  last_time = cur_time;
1536  }
1537 
1538  t = (cur_time-timer_start) / 1000000.0;
1539 
1540 
1541  oc = output_files[0]->ctx;
1542 
1543  total_size = avio_size(oc->pb);
1544  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1545  total_size = avio_tell(oc->pb);
1546 
1547  buf[0] = '\0';
1548  vid = 0;
1549  av_bprint_init(&buf_script, 0, 1);
1550  for (i = 0; i < nb_output_streams; i++) {
1551  float q = -1;
1552  ost = output_streams[i];
1553  enc = ost->enc_ctx;
1554  if (!ost->stream_copy)
1555  q = ost->quality / (float) FF_QP2LAMBDA;
1556 
1557  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1558  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1559  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560  ost->file_index, ost->index, q);
1561  }
1562  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1563  float fps;
1564 
1565  frame_number = ost->frame_number;
1566  fps = t > 1 ? frame_number / t : 0;
1567  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1568  frame_number, fps < 9.95, fps, q);
1569  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1570  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1571  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1572  ost->file_index, ost->index, q);
1573  if (is_last_report)
1574  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1575  if (qp_hist) {
1576  int j;
1577  int qp = lrintf(q);
1578  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1579  qp_histogram[qp]++;
1580  for (j = 0; j < 32; j++)
1581  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1582  }
1583 
1584  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1585  int j;
1586  double error, error_sum = 0;
1587  double scale, scale_sum = 0;
1588  double p;
1589  char type[3] = { 'Y','U','V' };
1590  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1591  for (j = 0; j < 3; j++) {
1592  if (is_last_report) {
1593  error = enc->error[j];
1594  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1595  } else {
1596  error = ost->error[j];
1597  scale = enc->width * enc->height * 255.0 * 255.0;
1598  }
1599  if (j)
1600  scale /= 4;
1601  error_sum += error;
1602  scale_sum += scale;
1603  p = psnr(error / scale);
1604  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1605  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1606  ost->file_index, ost->index, type[j] | 32, p);
1607  }
1608  p = psnr(error_sum / scale_sum);
1609  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1610  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1611  ost->file_index, ost->index, p);
1612  }
1613  vid = 1;
1614  }
1615  /* compute min output value */
1617  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1618  ost->st->time_base, AV_TIME_BASE_Q));
1619  if (is_last_report)
1620  nb_frames_drop += ost->last_dropped;
1621  }
1622 
1623  secs = FFABS(pts) / AV_TIME_BASE;
1624  us = FFABS(pts) % AV_TIME_BASE;
1625  mins = secs / 60;
1626  secs %= 60;
1627  hours = mins / 60;
1628  mins %= 60;
1629 
1630  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1631  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1632 
1633  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1634  "size=N/A time=");
1635  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1636  "size=%8.0fkB time=", total_size / 1024.0);
1637  if (pts < 0)
1638  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1639  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1640  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1641  (100 * us) / AV_TIME_BASE);
1642 
1643  if (bitrate < 0) {
1644  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1645  av_bprintf(&buf_script, "bitrate=N/A\n");
1646  }else{
1647  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1648  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1649  }
1650 
1651  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1652  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1653  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1654  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1655  hours, mins, secs, us);
1656 
1658  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1660  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1661  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1662 
1663  if (speed < 0) {
1664  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1665  av_bprintf(&buf_script, "speed=N/A\n");
1666  } else {
1667  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1668  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1669  }
1670 
1671  if (print_stats || is_last_report) {
1672  const char end = is_last_report ? '\n' : '\r';
1673  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1674  fprintf(stderr, "%s %c", buf, end);
1675  } else
1676  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1677 
1678  fflush(stderr);
1679  }
1680 
1681  if (progress_avio) {
1682  av_bprintf(&buf_script, "progress=%s\n",
1683  is_last_report ? "end" : "continue");
1684  avio_write(progress_avio, buf_script.str,
1685  FFMIN(buf_script.len, buf_script.size - 1));
1686  avio_flush(progress_avio);
1687  av_bprint_finalize(&buf_script, NULL);
1688  if (is_last_report) {
1689  if ((ret = avio_closep(&progress_avio)) < 0)
1691  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1692  }
1693  }
1694 
1695  if (is_last_report)
1696  print_final_stats(total_size);
1697 }
1698 
1699 static void flush_encoders(void)
1700 {
1701  int i, ret;
1702 
1703  for (i = 0; i < nb_output_streams; i++) {
1704  OutputStream *ost = output_streams[i];
1705  AVCodecContext *enc = ost->enc_ctx;
1706  AVFormatContext *os = output_files[ost->file_index]->ctx;
1707  int stop_encoding = 0;
1708 
1709  if (!ost->encoding_needed)
1710  continue;
1711 
1712  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1713  continue;
1714 #if FF_API_LAVF_FMT_RAWPICTURE
1715  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1716  continue;
1717 #endif
1718 
1719  for (;;) {
1720  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1721  const char *desc;
1722 
1723  switch (enc->codec_type) {
1724  case AVMEDIA_TYPE_AUDIO:
1725  encode = avcodec_encode_audio2;
1726  desc = "audio";
1727  break;
1728  case AVMEDIA_TYPE_VIDEO:
1729  encode = avcodec_encode_video2;
1730  desc = "video";
1731  break;
1732  default:
1733  stop_encoding = 1;
1734  }
1735 
1736  if (encode) {
1737  AVPacket pkt;
1738  int pkt_size;
1739  int got_packet;
1740  av_init_packet(&pkt);
1741  pkt.data = NULL;
1742  pkt.size = 0;
1743 
1745  ret = encode(enc, &pkt, NULL, &got_packet);
1746  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1747  if (ret < 0) {
1748  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1749  desc,
1750  av_err2str(ret));
1751  exit_program(1);
1752  }
1753  if (ost->logfile && enc->stats_out) {
1754  fprintf(ost->logfile, "%s", enc->stats_out);
1755  }
1756  if (!got_packet) {
1757  stop_encoding = 1;
1758  break;
1759  }
1760  if (ost->finished & MUXER_FINISHED) {
1761  av_packet_unref(&pkt);
1762  continue;
1763  }
1764  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1765  pkt_size = pkt.size;
1766  write_frame(os, &pkt, ost);
1768  do_video_stats(ost, pkt_size);
1769  }
1770  }
1771 
1772  if (stop_encoding)
1773  break;
1774  }
1775  }
1776 }
1777 
1778 /*
1779  * Check whether a packet from ist should be written into ost at this time
1780  */
1782 {
1783  OutputFile *of = output_files[ost->file_index];
1784  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1785 
1786  if (ost->source_index != ist_index)
1787  return 0;
1788 
1789  if (ost->finished)
1790  return 0;
1791 
1792  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1793  return 0;
1794 
1795  return 1;
1796 }
1797 
1798 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1799 {
1800  OutputFile *of = output_files[ost->file_index];
1801  InputFile *f = input_files [ist->file_index];
1802  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1803  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1804  AVPicture pict;
1805  AVPacket opkt;
1806 
1807  av_init_packet(&opkt);
1808 
1809  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1811  return;
1812 
1813  if (!ost->frame_number && !ost->copy_prior_start) {
1814  int64_t comp_start = start_time;
1815  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1816  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1817  if (pkt->pts == AV_NOPTS_VALUE ?
1818  ist->pts < comp_start :
1819  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1820  return;
1821  }
1822 
1823  if (of->recording_time != INT64_MAX &&
1824  ist->pts >= of->recording_time + start_time) {
1825  close_output_stream(ost);
1826  return;
1827  }
1828 
1829  if (f->recording_time != INT64_MAX) {
1830  start_time = f->ctx->start_time;
1831  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1832  start_time += f->start_time;
1833  if (ist->pts >= f->recording_time + start_time) {
1834  close_output_stream(ost);
1835  return;
1836  }
1837  }
1838 
1839  /* force the input stream PTS */
1840  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1841  ost->sync_opts++;
1842 
1843  if (pkt->pts != AV_NOPTS_VALUE)
1844  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1845  else
1846  opkt.pts = AV_NOPTS_VALUE;
1847 
1848  if (pkt->dts == AV_NOPTS_VALUE)
1849  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1850  else
1851  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1852  opkt.dts -= ost_tb_start_time;
1853 
1854  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1856  if(!duration)
1857  duration = ist->dec_ctx->frame_size;
1858  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1860  ost->st->time_base) - ost_tb_start_time;
1861  }
1862 
1863  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1864  opkt.flags = pkt->flags;
1865  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1866  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1867  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1868  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1869  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1870  ) {
1871  int ret = av_parser_change(ost->parser, ost->st->codec,
1872  &opkt.data, &opkt.size,
1873  pkt->data, pkt->size,
1875  if (ret < 0) {
1876  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1877  av_err2str(ret));
1878  exit_program(1);
1879  }
1880  if (ret) {
1881  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1882  if (!opkt.buf)
1883  exit_program(1);
1884  }
1885  } else {
1886  opkt.data = pkt->data;
1887  opkt.size = pkt->size;
1888  }
1889  av_copy_packet_side_data(&opkt, pkt);
1890 
1891 #if FF_API_LAVF_FMT_RAWPICTURE
1892  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1893  ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1894  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1895  /* store AVPicture in AVPacket, as expected by the output format */
1896  int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1897  if (ret < 0) {
1898  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1899  av_err2str(ret));
1900  exit_program(1);
1901  }
1902  opkt.data = (uint8_t *)&pict;
1903  opkt.size = sizeof(AVPicture);
1904  opkt.flags |= AV_PKT_FLAG_KEY;
1905  }
1906 #endif
1907 
1908  write_frame(of->ctx, &opkt, ost);
1909 }
1910 
1912 {
1913  AVCodecContext *dec = ist->dec_ctx;
1914 
1915  if (!dec->channel_layout) {
1916  char layout_name[256];
1917 
1918  if (dec->channels > ist->guess_layout_max)
1919  return 0;
1921  if (!dec->channel_layout)
1922  return 0;
1923  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1924  dec->channels, dec->channel_layout);
1925  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1926  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1927  }
1928  return 1;
1929 }
1930 
1931 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1932 {
1933  if (*got_output || ret<0)
1934  decode_error_stat[ret<0] ++;
1935 
1936  if (ret < 0 && exit_on_error)
1937  exit_program(1);
1938 
1939  if (exit_on_error && *got_output && ist) {
1941  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1942  exit_program(1);
1943  }
1944  }
1945 }
1946 
1947 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1948 {
1949  AVFrame *decoded_frame, *f;
1950  AVCodecContext *avctx = ist->dec_ctx;
1951  int i, ret, err = 0, resample_changed;
1952  AVRational decoded_frame_tb;
1953 
1954  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1955  return AVERROR(ENOMEM);
1956  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1957  return AVERROR(ENOMEM);
1958  decoded_frame = ist->decoded_frame;
1959 
1961  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1962  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1963 
1964  if (ret >= 0 && avctx->sample_rate <= 0) {
1965  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1966  ret = AVERROR_INVALIDDATA;
1967  }
1968 
1969  check_decode_result(ist, got_output, ret);
1970 
1971  if (!*got_output || ret < 0)
1972  return ret;
1973 
1974  ist->samples_decoded += decoded_frame->nb_samples;
1975  ist->frames_decoded++;
1976 
1977 #if 1
1978  /* increment next_dts to use for the case where the input stream does not
1979  have timestamps or there are multiple frames in the packet */
1980  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1981  avctx->sample_rate;
1982  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1983  avctx->sample_rate;
1984 #endif
1985 
1986  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1987  ist->resample_channels != avctx->channels ||
1988  ist->resample_channel_layout != decoded_frame->channel_layout ||
1989  ist->resample_sample_rate != decoded_frame->sample_rate;
1990  if (resample_changed) {
1991  char layout1[64], layout2[64];
1992 
1993  if (!guess_input_channel_layout(ist)) {
1994  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1995  "layout for Input Stream #%d.%d\n", ist->file_index,
1996  ist->st->index);
1997  exit_program(1);
1998  }
1999  decoded_frame->channel_layout = avctx->channel_layout;
2000 
2001  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2003  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2004  decoded_frame->channel_layout);
2005 
2007  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2008  ist->file_index, ist->st->index,
2010  ist->resample_channels, layout1,
2011  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2012  avctx->channels, layout2);
2013 
2014  ist->resample_sample_fmt = decoded_frame->format;
2015  ist->resample_sample_rate = decoded_frame->sample_rate;
2016  ist->resample_channel_layout = decoded_frame->channel_layout;
2017  ist->resample_channels = avctx->channels;
2018 
2019  for (i = 0; i < nb_filtergraphs; i++)
2020  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2021  FilterGraph *fg = filtergraphs[i];
2022  if (configure_filtergraph(fg) < 0) {
2023  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2024  exit_program(1);
2025  }
2026  }
2027  }
2028 
2029  /* if the decoder provides a pts, use it instead of the last packet pts.
2030  the decoder could be delaying output by a packet or more. */
2031  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2032  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2033  decoded_frame_tb = avctx->time_base;
2034  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2035  decoded_frame->pts = decoded_frame->pkt_pts;
2036  decoded_frame_tb = ist->st->time_base;
2037  } else if (pkt->pts != AV_NOPTS_VALUE) {
2038  decoded_frame->pts = pkt->pts;
2039  decoded_frame_tb = ist->st->time_base;
2040  }else {
2041  decoded_frame->pts = ist->dts;
2042  decoded_frame_tb = AV_TIME_BASE_Q;
2043  }
2044  pkt->pts = AV_NOPTS_VALUE;
2045  if (decoded_frame->pts != AV_NOPTS_VALUE)
2046  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2047  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2048  (AVRational){1, avctx->sample_rate});
2049  ist->nb_samples = decoded_frame->nb_samples;
2050  for (i = 0; i < ist->nb_filters; i++) {
2051  if (i < ist->nb_filters - 1) {
2052  f = ist->filter_frame;
2053  err = av_frame_ref(f, decoded_frame);
2054  if (err < 0)
2055  break;
2056  } else
2057  f = decoded_frame;
2058  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2060  if (err == AVERROR_EOF)
2061  err = 0; /* ignore */
2062  if (err < 0)
2063  break;
2064  }
2065  decoded_frame->pts = AV_NOPTS_VALUE;
2066 
2067  av_frame_unref(ist->filter_frame);
2068  av_frame_unref(decoded_frame);
2069  return err < 0 ? err : ret;
2070 }
2071 
2072 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2073 {
2074  AVFrame *decoded_frame, *f;
2075  int i, ret = 0, err = 0, resample_changed;
2076  int64_t best_effort_timestamp;
2077  AVRational *frame_sample_aspect;
2078 
2079  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2080  return AVERROR(ENOMEM);
2081  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2082  return AVERROR(ENOMEM);
2083  decoded_frame = ist->decoded_frame;
2084  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2085 
2087  ret = avcodec_decode_video2(ist->dec_ctx,
2088  decoded_frame, got_output, pkt);
2089  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2090 
2091  // The following line may be required in some cases where there is no parser
2092  // or the parser does not has_b_frames correctly
2093  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2094  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2095  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2096  } else
2098  "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2099  "If you want to help, upload a sample "
2100  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2101  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2102  ist->dec_ctx->has_b_frames,
2103  ist->st->codec->has_b_frames);
2104  }
2105 
2106  check_decode_result(ist, got_output, ret);
2107 
2108  if (*got_output && ret >= 0) {
2109  if (ist->dec_ctx->width != decoded_frame->width ||
2110  ist->dec_ctx->height != decoded_frame->height ||
2111  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2112  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2113  decoded_frame->width,
2114  decoded_frame->height,
2115  decoded_frame->format,
2116  ist->dec_ctx->width,
2117  ist->dec_ctx->height,
2118  ist->dec_ctx->pix_fmt);
2119  }
2120  }
2121 
2122  if (!*got_output || ret < 0)
2123  return ret;
2124 
2125  if(ist->top_field_first>=0)
2126  decoded_frame->top_field_first = ist->top_field_first;
2127 
2128  ist->frames_decoded++;
2129 
2130  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2131  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2132  if (err < 0)
2133  goto fail;
2134  }
2135  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2136 
2137  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2138  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2139  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2140 
2141  if (ts != AV_NOPTS_VALUE)
2142  ist->next_pts = ist->pts = ts;
2143  }
2144 
2145  if (debug_ts) {
2146  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2147  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2148  ist->st->index, av_ts2str(decoded_frame->pts),
2149  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2150  best_effort_timestamp,
2151  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2152  decoded_frame->key_frame, decoded_frame->pict_type,
2153  ist->st->time_base.num, ist->st->time_base.den);
2154  }
2155 
2156  pkt->size = 0;
2157 
2158  if (ist->st->sample_aspect_ratio.num)
2159  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2160 
2161  resample_changed = ist->resample_width != decoded_frame->width ||
2162  ist->resample_height != decoded_frame->height ||
2163  ist->resample_pix_fmt != decoded_frame->format;
2164  if (resample_changed) {
2166  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2167  ist->file_index, ist->st->index,
2169  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2170 
2171  ist->resample_width = decoded_frame->width;
2172  ist->resample_height = decoded_frame->height;
2173  ist->resample_pix_fmt = decoded_frame->format;
2174 
2175  for (i = 0; i < nb_filtergraphs; i++) {
2176  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2177  configure_filtergraph(filtergraphs[i]) < 0) {
2178  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2179  exit_program(1);
2180  }
2181  }
2182  }
2183 
2184  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2185  for (i = 0; i < ist->nb_filters; i++) {
2186  if (!frame_sample_aspect->num)
2187  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2188 
2189  if (i < ist->nb_filters - 1) {
2190  f = ist->filter_frame;
2191  err = av_frame_ref(f, decoded_frame);
2192  if (err < 0)
2193  break;
2194  } else
2195  f = decoded_frame;
2197  if (ret == AVERROR_EOF) {
2198  ret = 0; /* ignore */
2199  } else if (ret < 0) {
2201  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2202  exit_program(1);
2203  }
2204  }
2205 
2206 fail:
2208  av_frame_unref(decoded_frame);
2209  return err < 0 ? err : ret;
2210 }
2211 
2212 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2213 {
2214  AVSubtitle subtitle;
2215  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2216  &subtitle, got_output, pkt);
2217 
2218  check_decode_result(NULL, got_output, ret);
2219 
2220  if (ret < 0 || !*got_output) {
2221  if (!pkt->size)
2222  sub2video_flush(ist);
2223  return ret;
2224  }
2225 
2226  if (ist->fix_sub_duration) {
2227  int end = 1;
2228  if (ist->prev_sub.got_output) {
2229  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2230  1000, AV_TIME_BASE);
2231  if (end < ist->prev_sub.subtitle.end_display_time) {
2232  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2233  "Subtitle duration reduced from %d to %d%s\n",
2235  end <= 0 ? ", dropping it" : "");
2237  }
2238  }
2239  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2240  FFSWAP(int, ret, ist->prev_sub.ret);
2241  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2242  if (end <= 0)
2243  goto out;
2244  }
2245 
2246  if (!*got_output)
2247  return ret;
2248 
2249  sub2video_update(ist, &subtitle);
2250 
2251  if (!subtitle.num_rects)
2252  goto out;
2253 
2254  ist->frames_decoded++;
2255 
2256  for (i = 0; i < nb_output_streams; i++) {
2257  OutputStream *ost = output_streams[i];
2258 
2259  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2260  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2261  continue;
2262 
2263  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2264  }
2265 
2266 out:
2267  avsubtitle_free(&subtitle);
2268  return ret;
2269 }
2270 
2272 {
2273  int i, ret;
2274  for (i = 0; i < ist->nb_filters; i++) {
2275  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2276  if (ret < 0)
2277  return ret;
2278  }
2279  return 0;
2280 }
2281 
2282 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2283 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2284 {
2285  int ret = 0, i;
2286  int got_output = 0;
2287 
2288  AVPacket avpkt;
2289  if (!ist->saw_first_ts) {
2290  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2291  ist->pts = 0;
2292  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2293  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2294  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2295  }
2296  ist->saw_first_ts = 1;
2297  }
2298 
2299  if (ist->next_dts == AV_NOPTS_VALUE)
2300  ist->next_dts = ist->dts;
2301  if (ist->next_pts == AV_NOPTS_VALUE)
2302  ist->next_pts = ist->pts;
2303 
2304  if (!pkt) {
2305  /* EOF handling */
2306  av_init_packet(&avpkt);
2307  avpkt.data = NULL;
2308  avpkt.size = 0;
2309  goto handle_eof;
2310  } else {
2311  avpkt = *pkt;
2312  }
2313 
2314  if (pkt->dts != AV_NOPTS_VALUE) {
2315  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2316  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2317  ist->next_pts = ist->pts = ist->dts;
2318  }
2319 
2320  // while we have more to decode or while the decoder did output something on EOF
2321  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2322  int duration;
2323  handle_eof:
2324 
2325  ist->pts = ist->next_pts;
2326  ist->dts = ist->next_dts;
2327 
2328  if (avpkt.size && avpkt.size != pkt->size &&
2331  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2332  ist->showed_multi_packet_warning = 1;
2333  }
2334 
2335  switch (ist->dec_ctx->codec_type) {
2336  case AVMEDIA_TYPE_AUDIO:
2337  ret = decode_audio (ist, &avpkt, &got_output);
2338  break;
2339  case AVMEDIA_TYPE_VIDEO:
2340  ret = decode_video (ist, &avpkt, &got_output);
2341  if (avpkt.duration) {
2342  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2343  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2345  duration = ((int64_t)AV_TIME_BASE *
2346  ist->dec_ctx->framerate.den * ticks) /
2348  } else
2349  duration = 0;
2350 
2351  if(ist->dts != AV_NOPTS_VALUE && duration) {
2352  ist->next_dts += duration;
2353  }else
2354  ist->next_dts = AV_NOPTS_VALUE;
2355 
2356  if (got_output)
2357  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2358  break;
2359  case AVMEDIA_TYPE_SUBTITLE:
2360  ret = transcode_subtitles(ist, &avpkt, &got_output);
2361  break;
2362  default:
2363  return -1;
2364  }
2365 
2366  if (ret < 0) {
2367  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2368  ist->file_index, ist->st->index, av_err2str(ret));
2369  if (exit_on_error)
2370  exit_program(1);
2371  break;
2372  }
2373 
2374  avpkt.dts=
2375  avpkt.pts= AV_NOPTS_VALUE;
2376 
2377  // touch data and size only if not EOF
2378  if (pkt) {
2380  ret = avpkt.size;
2381  avpkt.data += ret;
2382  avpkt.size -= ret;
2383  }
2384  if (!got_output) {
2385  continue;
2386  }
2387  if (got_output && !pkt)
2388  break;
2389  }
2390 
2391  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2392  /* except when looping we need to flush but not to send an EOF */
2393  if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2394  int ret = send_filter_eof(ist);
2395  if (ret < 0) {
2396  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2397  exit_program(1);
2398  }
2399  }
2400 
2401  /* handle stream copy */
2402  if (!ist->decoding_needed) {
2403  ist->dts = ist->next_dts;
2404  switch (ist->dec_ctx->codec_type) {
2405  case AVMEDIA_TYPE_AUDIO:
2406  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2407  ist->dec_ctx->sample_rate;
2408  break;
2409  case AVMEDIA_TYPE_VIDEO:
2410  if (ist->framerate.num) {
2411  // TODO: Remove work-around for c99-to-c89 issue 7
2412  AVRational time_base_q = AV_TIME_BASE_Q;
2413  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2414  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2415  } else if (pkt->duration) {
2416  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2417  } else if(ist->dec_ctx->framerate.num != 0) {
2418  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2419  ist->next_dts += ((int64_t)AV_TIME_BASE *
2420  ist->dec_ctx->framerate.den * ticks) /
2422  }
2423  break;
2424  }
2425  ist->pts = ist->dts;
2426  ist->next_pts = ist->next_dts;
2427  }
2428  for (i = 0; pkt && i < nb_output_streams; i++) {
2429  OutputStream *ost = output_streams[i];
2430 
2431  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2432  continue;
2433 
2434  do_streamcopy(ist, ost, pkt);
2435  }
2436 
2437  return got_output;
2438 }
2439 
2440 static void print_sdp(void)
2441 {
2442  char sdp[16384];
2443  int i;
2444  int j;
2445  AVIOContext *sdp_pb;
2446  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2447 
2448  if (!avc)
2449  exit_program(1);
2450  for (i = 0, j = 0; i < nb_output_files; i++) {
2451  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2452  avc[j] = output_files[i]->ctx;
2453  j++;
2454  }
2455  }
2456 
2457  if (!j)
2458  goto fail;
2459 
2460  av_sdp_create(avc, j, sdp, sizeof(sdp));
2461 
2462  if (!sdp_filename) {
2463  printf("SDP:\n%s\n", sdp);
2464  fflush(stdout);
2465  } else {
2466  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2467  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2468  } else {
2469  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2470  avio_closep(&sdp_pb);
2472  }
2473  }
2474 
2475 fail:
2476  av_freep(&avc);
2477 }
2478 
2480 {
2481  int i;
2482  for (i = 0; hwaccels[i].name; i++)
2483  if (hwaccels[i].pix_fmt == pix_fmt)
2484  return &hwaccels[i];
2485  return NULL;
2486 }
2487 
2489 {
2490  InputStream *ist = s->opaque;
2491  const enum AVPixelFormat *p;
2492  int ret;
2493 
2494  for (p = pix_fmts; *p != -1; p++) {
2495  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2496  const HWAccel *hwaccel;
2497 
2498  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2499  break;
2500 
2501  hwaccel = get_hwaccel(*p);
2502  if (!hwaccel ||
2503  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2504  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2505  continue;
2506 
2507  ret = hwaccel->init(s);
2508  if (ret < 0) {
2509  if (ist->hwaccel_id == hwaccel->id) {
2511  "%s hwaccel requested for input stream #%d:%d, "
2512  "but cannot be initialized.\n", hwaccel->name,
2513  ist->file_index, ist->st->index);
2514  return AV_PIX_FMT_NONE;
2515  }
2516  continue;
2517  }
2518  ist->active_hwaccel_id = hwaccel->id;
2519  ist->hwaccel_pix_fmt = *p;
2520  break;
2521  }
2522 
2523  return *p;
2524 }
2525 
2527 {
2528  InputStream *ist = s->opaque;
2529 
2530  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2531  return ist->hwaccel_get_buffer(s, frame, flags);
2532 
2533  return avcodec_default_get_buffer2(s, frame, flags);
2534 }
2535 
2536 static int init_input_stream(int ist_index, char *error, int error_len)
2537 {
2538  int ret;
2539  InputStream *ist = input_streams[ist_index];
2540 
2541  if (ist->decoding_needed) {
2542  AVCodec *codec = ist->dec;
2543  if (!codec) {
2544  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2545  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2546  return AVERROR(EINVAL);
2547  }
2548 
2549  ist->dec_ctx->opaque = ist;
2550  ist->dec_ctx->get_format = get_format;
2551  ist->dec_ctx->get_buffer2 = get_buffer;
2552  ist->dec_ctx->thread_safe_callbacks = 1;
2553 
2554  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2555  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2556  (ist->decoding_needed & DECODING_FOR_OST)) {
2557  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2559  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2560  }
2561 
2562  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2563 
2564  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2565  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2566  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2567  if (ret == AVERROR_EXPERIMENTAL)
2568  abort_codec_experimental(codec, 0);
2569 
2570  snprintf(error, error_len,
2571  "Error while opening decoder for input stream "
2572  "#%d:%d : %s",
2573  ist->file_index, ist->st->index, av_err2str(ret));
2574  return ret;
2575  }
2577  }
2578 
2579  ist->next_pts = AV_NOPTS_VALUE;
2580  ist->next_dts = AV_NOPTS_VALUE;
2581 
2582  return 0;
2583 }
2584 
2586 {
2587  if (ost->source_index >= 0)
2588  return input_streams[ost->source_index];
2589  return NULL;
2590 }
2591 
2592 static int compare_int64(const void *a, const void *b)
2593 {
2594  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2595 }
2596 
2597 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2598 {
2599  int ret = 0;
2600 
2601  if (ost->encoding_needed) {
2602  AVCodec *codec = ost->enc;
2603  AVCodecContext *dec = NULL;
2604  InputStream *ist;
2605 
2606  if ((ist = get_input_stream(ost)))
2607  dec = ist->dec_ctx;
2608  if (dec && dec->subtitle_header) {
2609  /* ASS code assumes this buffer is null terminated so add extra byte. */
2611  if (!ost->enc_ctx->subtitle_header)
2612  return AVERROR(ENOMEM);
2613  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2615  }
2616  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2617  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2618  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2619  !codec->defaults &&
2620  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2621  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2622  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2623 
2624  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2626  if (!ost->enc_ctx->hw_frames_ctx)
2627  return AVERROR(ENOMEM);
2628  }
2629 
2630  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2631  if (ret == AVERROR_EXPERIMENTAL)
2632  abort_codec_experimental(codec, 1);
2633  snprintf(error, error_len,
2634  "Error while opening encoder for output stream #%d:%d - "
2635  "maybe incorrect parameters such as bit_rate, rate, width or height",
2636  ost->file_index, ost->index);
2637  return ret;
2638  }
2639  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2640  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2642  ost->enc_ctx->frame_size);
2644  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2645  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2646  " It takes bits/s as argument, not kbits/s\n");
2647 
2648  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2649  if (ret < 0) {
2651  "Error initializing the output stream codec context.\n");
2652  exit_program(1);
2653  }
2654 
2655  if (ost->enc_ctx->nb_coded_side_data) {
2656  int i;
2657 
2659  sizeof(*ost->st->side_data));
2660  if (!ost->st->side_data)
2661  return AVERROR(ENOMEM);
2662 
2663  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2664  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2665  AVPacketSideData *sd_dst = &ost->st->side_data[i];
2666 
2667  sd_dst->data = av_malloc(sd_src->size);
2668  if (!sd_dst->data)
2669  return AVERROR(ENOMEM);
2670  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2671  sd_dst->size = sd_src->size;
2672  sd_dst->type = sd_src->type;
2673  ost->st->nb_side_data++;
2674  }
2675  }
2676 
2677  // copy timebase while removing common factors
2678  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2679  ost->st->codec->codec= ost->enc_ctx->codec;
2680  } else {
2681  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2682  if (ret < 0) {
2684  "Error setting up codec context options.\n");
2685  return ret;
2686  }
2687  // copy timebase while removing common factors
2688  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2689  }
2690 
2691  return ret;
2692 }
2693 
2694 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2695  AVCodecContext *avctx)
2696 {
2697  char *p;
2698  int n = 1, i, size, index = 0;
2699  int64_t t, *pts;
2700 
2701  for (p = kf; *p; p++)
2702  if (*p == ',')
2703  n++;
2704  size = n;
2705  pts = av_malloc_array(size, sizeof(*pts));
2706  if (!pts) {
2707  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2708  exit_program(1);
2709  }
2710 
2711  p = kf;
2712  for (i = 0; i < n; i++) {
2713  char *next = strchr(p, ',');
2714 
2715  if (next)
2716  *next++ = 0;
2717 
2718  if (!memcmp(p, "chapters", 8)) {
2719 
2720  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2721  int j;
2722 
2723  if (avf->nb_chapters > INT_MAX - size ||
2724  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2725  sizeof(*pts)))) {
2727  "Could not allocate forced key frames array.\n");
2728  exit_program(1);
2729  }
2730  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2731  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2732 
2733  for (j = 0; j < avf->nb_chapters; j++) {
2734  AVChapter *c = avf->chapters[j];
2735  av_assert1(index < size);
2736  pts[index++] = av_rescale_q(c->start, c->time_base,
2737  avctx->time_base) + t;
2738  }
2739 
2740  } else {
2741 
2742  t = parse_time_or_die("force_key_frames", p, 1);
2743  av_assert1(index < size);
2744  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2745 
2746  }
2747 
2748  p = next;
2749  }
2750 
2751  av_assert0(index == size);
2752  qsort(pts, size, sizeof(*pts), compare_int64);
2753  ost->forced_kf_count = size;
2754  ost->forced_kf_pts = pts;
2755 }
2756 
2757 static void report_new_stream(int input_index, AVPacket *pkt)
2758 {
2759  InputFile *file = input_files[input_index];
2760  AVStream *st = file->ctx->streams[pkt->stream_index];
2761 
2762  if (pkt->stream_index < file->nb_streams_warn)
2763  return;
2764  av_log(file->ctx, AV_LOG_WARNING,
2765  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2766  av_get_media_type_string(st->codec->codec_type),
2767  input_index, pkt->stream_index,
2768  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2769  file->nb_streams_warn = pkt->stream_index + 1;
2770 }
2771 
2773 {
2774  AVDictionaryEntry *e;
2775 
2776  uint8_t *encoder_string;
2777  int encoder_string_len;
2778  int format_flags = 0;
2779  int codec_flags = 0;
2780 
2781  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2782  return;
2783 
2784  e = av_dict_get(of->opts, "fflags", NULL, 0);
2785  if (e) {
2786  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2787  if (!o)
2788  return;
2789  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2790  }
2791  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2792  if (e) {
2793  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2794  if (!o)
2795  return;
2796  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2797  }
2798 
2799  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2800  encoder_string = av_mallocz(encoder_string_len);
2801  if (!encoder_string)
2802  exit_program(1);
2803 
2804  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2805  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2806  else
2807  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2808  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2809  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2811 }
2812 
2813 static int transcode_init(void)
2814 {
2815  int ret = 0, i, j, k;
2816  AVFormatContext *oc;
2817  OutputStream *ost;
2818  InputStream *ist;
2819  char error[1024] = {0};
2820  int want_sdp = 1;
2821 
2822  for (i = 0; i < nb_filtergraphs; i++) {
2823  FilterGraph *fg = filtergraphs[i];
2824  for (j = 0; j < fg->nb_outputs; j++) {
2825  OutputFilter *ofilter = fg->outputs[j];
2826  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2827  continue;
2828  if (fg->nb_inputs != 1)
2829  continue;
2830  for (k = nb_input_streams-1; k >= 0 ; k--)
2831  if (fg->inputs[0]->ist == input_streams[k])
2832  break;
2833  ofilter->ost->source_index = k;
2834  }
2835  }
2836 
2837  /* init framerate emulation */
2838  for (i = 0; i < nb_input_files; i++) {
2839  InputFile *ifile = input_files[i];
2840  if (ifile->rate_emu)
2841  for (j = 0; j < ifile->nb_streams; j++)
2842  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2843  }
2844 
2845  /* for each output stream, we compute the right encoding parameters */
2846  for (i = 0; i < nb_output_streams; i++) {
2847  AVCodecContext *enc_ctx;
2849  ost = output_streams[i];
2850  oc = output_files[ost->file_index]->ctx;
2851  ist = get_input_stream(ost);
2852 
2853  if (ost->attachment_filename)
2854  continue;
2855 
2856  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2857 
2858  if (ist) {
2859  dec_ctx = ist->dec_ctx;
2860 
2861  ost->st->disposition = ist->st->disposition;
2862  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2863  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2864  } else {
2865  for (j=0; j<oc->nb_streams; j++) {
2866  AVStream *st = oc->streams[j];
2867  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2868  break;
2869  }
2870  if (j == oc->nb_streams)
2871  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2873  }
2874 
2875  if (ost->stream_copy) {
2876  AVRational sar;
2877  uint64_t extra_size;
2878 
2879  av_assert0(ist && !ost->filter);
2880 
2881  extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2882 
2883  if (extra_size > INT_MAX) {
2884  return AVERROR(EINVAL);
2885  }
2886 
2887  /* if stream_copy is selected, no need to decode or encode */
2888  enc_ctx->codec_id = dec_ctx->codec_id;
2889  enc_ctx->codec_type = dec_ctx->codec_type;
2890 
2891  if (!enc_ctx->codec_tag) {
2892  unsigned int codec_tag;
2893  if (!oc->oformat->codec_tag ||
2894  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2895  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2896  enc_ctx->codec_tag = dec_ctx->codec_tag;
2897  }
2898 
2899  enc_ctx->bit_rate = dec_ctx->bit_rate;
2900  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2901  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2902  enc_ctx->field_order = dec_ctx->field_order;
2903  if (dec_ctx->extradata_size) {
2904  enc_ctx->extradata = av_mallocz(extra_size);
2905  if (!enc_ctx->extradata) {
2906  return AVERROR(ENOMEM);
2907  }
2908  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2909  }
2910  enc_ctx->extradata_size= dec_ctx->extradata_size;
2911  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2912 
2913  enc_ctx->time_base = ist->st->time_base;
2914  /*
2915  * Avi is a special case here because it supports variable fps but
2916  * having the fps and timebase differe significantly adds quite some
2917  * overhead
2918  */
2919  if(!strcmp(oc->oformat->name, "avi")) {
2920  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2921  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2922  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2923  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2924  || copy_tb==2){
2925  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2926  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2927  enc_ctx->ticks_per_frame = 2;
2928  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2929  && av_q2d(ist->st->time_base) < 1.0/500
2930  || copy_tb==0){
2931  enc_ctx->time_base = dec_ctx->time_base;
2932  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2933  enc_ctx->time_base.den *= 2;
2934  enc_ctx->ticks_per_frame = 2;
2935  }
2936  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2937  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2938  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2939  && strcmp(oc->oformat->name, "f4v")
2940  ) {
2941  if( copy_tb<0 && dec_ctx->time_base.den
2942  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2943  && av_q2d(ist->st->time_base) < 1.0/500
2944  || copy_tb==0){
2945  enc_ctx->time_base = dec_ctx->time_base;
2946  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2947  }
2948  }
2949  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2950  && dec_ctx->time_base.num < dec_ctx->time_base.den
2951  && dec_ctx->time_base.num > 0
2952  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2953  enc_ctx->time_base = dec_ctx->time_base;
2954  }
2955 
2956  if (!ost->frame_rate.num)
2957  ost->frame_rate = ist->framerate;
2958  if(ost->frame_rate.num)
2959  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2960 
2961  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2962  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2963 
2964  if (ist->st->nb_side_data) {
2966  sizeof(*ist->st->side_data));
2967  if (!ost->st->side_data)
2968  return AVERROR(ENOMEM);
2969 
2970  ost->st->nb_side_data = 0;
2971  for (j = 0; j < ist->st->nb_side_data; j++) {
2972  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2973  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2974 
2975  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2976  continue;
2977 
2978  sd_dst->data = av_malloc(sd_src->size);
2979  if (!sd_dst->data)
2980  return AVERROR(ENOMEM);
2981  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2982  sd_dst->size = sd_src->size;
2983  sd_dst->type = sd_src->type;
2984  ost->st->nb_side_data++;
2985  }
2986  }
2987 
2988  ost->parser = av_parser_init(enc_ctx->codec_id);
2989 
2990  switch (enc_ctx->codec_type) {
2991  case AVMEDIA_TYPE_AUDIO:
2992  if (audio_volume != 256) {
2993  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2994  exit_program(1);
2995  }
2996  enc_ctx->channel_layout = dec_ctx->channel_layout;
2997  enc_ctx->sample_rate = dec_ctx->sample_rate;
2998  enc_ctx->channels = dec_ctx->channels;
2999  enc_ctx->frame_size = dec_ctx->frame_size;
3000  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
3001  enc_ctx->block_align = dec_ctx->block_align;
3002  enc_ctx->initial_padding = dec_ctx->delay;
3003  enc_ctx->profile = dec_ctx->profile;
3004 #if FF_API_AUDIOENC_DELAY
3005  enc_ctx->delay = dec_ctx->delay;
3006 #endif
3007  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
3008  enc_ctx->block_align= 0;
3009  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3010  enc_ctx->block_align= 0;
3011  break;
3012  case AVMEDIA_TYPE_VIDEO:
3013  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3014  enc_ctx->width = dec_ctx->width;
3015  enc_ctx->height = dec_ctx->height;
3016  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3017  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3018  sar =
3020  (AVRational){ enc_ctx->height, enc_ctx->width });
3021  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3022  "with stream copy may produce invalid files\n");
3023  }
3024  else if (ist->st->sample_aspect_ratio.num)
3025  sar = ist->st->sample_aspect_ratio;
3026  else
3027  sar = dec_ctx->sample_aspect_ratio;
3028  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3029  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3030  ost->st->r_frame_rate = ist->st->r_frame_rate;
3031  break;
3032  case AVMEDIA_TYPE_SUBTITLE:
3033  enc_ctx->width = dec_ctx->width;
3034  enc_ctx->height = dec_ctx->height;
3035  break;
3036  case AVMEDIA_TYPE_UNKNOWN:
3037  case AVMEDIA_TYPE_DATA:
3039  break;
3040  default:
3041  abort();
3042  }
3043  } else {
3044  if (!ost->enc)
3045  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3046  if (!ost->enc) {
3047  /* should only happen when a default codec is not present. */
3048  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3049  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3050  ret = AVERROR(EINVAL);
3051  goto dump_format;
3052  }
3053 
3054  set_encoder_id(output_files[ost->file_index], ost);
3055 
3056 #if CONFIG_LIBMFX
3057  if (qsv_transcode_init(ost))
3058  exit_program(1);
3059 #endif
3060 
3061  if (!ost->filter &&
3062  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3063  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3064  FilterGraph *fg;
3065  fg = init_simple_filtergraph(ist, ost);
3066  if (configure_filtergraph(fg)) {
3067  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3068  exit_program(1);
3069  }
3070  }
3071 
3072  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3073  if (!ost->frame_rate.num)
3075  if (ist && !ost->frame_rate.num)
3076  ost->frame_rate = ist->framerate;
3077  if (ist && !ost->frame_rate.num)
3078  ost->frame_rate = ist->st->r_frame_rate;
3079  if (ist && !ost->frame_rate.num) {
3080  ost->frame_rate = (AVRational){25, 1};
3082  "No information "
3083  "about the input framerate is available. Falling "
3084  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3085  "if you want a different framerate.\n",
3086  ost->file_index, ost->index);
3087  }
3088 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3089  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3090  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3091  ost->frame_rate = ost->enc->supported_framerates[idx];
3092  }
3093  // reduce frame rate for mpeg4 to be within the spec limits
3094  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3095  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3096  ost->frame_rate.num, ost->frame_rate.den, 65535);
3097  }
3098  }
3099 
3100  switch (enc_ctx->codec_type) {
3101  case AVMEDIA_TYPE_AUDIO:
3102  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3103  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3104  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3105  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3106  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3107  break;
3108  case AVMEDIA_TYPE_VIDEO:
3109  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3110  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3111  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3112  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3114  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3115  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3116  }
3117  for (j = 0; j < ost->forced_kf_count; j++)
3118  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3120  enc_ctx->time_base);
3121 
3122  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3123  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3124  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3125  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3126  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3128  if (!strncmp(ost->enc->name, "libx264", 7) &&
3129  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3132  "No pixel format specified, %s for H.264 encoding chosen.\n"
3133  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3135  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3136  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3139  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3140  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3142  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3143 
3144  ost->st->avg_frame_rate = ost->frame_rate;
3145 
3146  if (!dec_ctx ||
3147  enc_ctx->width != dec_ctx->width ||
3148  enc_ctx->height != dec_ctx->height ||
3149  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3150  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3151  }
3152 
3153  if (ost->forced_keyframes) {
3154  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3157  if (ret < 0) {
3159  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3160  return ret;
3161  }
3166 
3167  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3168  // parse it only for static kf timings
3169  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3171  }
3172  }
3173  break;
3174  case AVMEDIA_TYPE_SUBTITLE:
3175  enc_ctx->time_base = (AVRational){1, 1000};
3176  if (!enc_ctx->width) {
3177  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3178  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3179  }
3180  break;
3181  case AVMEDIA_TYPE_DATA:
3182  break;
3183  default:
3184  abort();
3185  break;
3186  }
3187  }
3188 
3189  if (ost->disposition) {
3190  static const AVOption opts[] = {
3191  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3192  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3193  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3194  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3195  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3196  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3197  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3198  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3199  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3200  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3201  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3202  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3203  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3204  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3205  { NULL },
3206  };
3207  static const AVClass class = {
3208  .class_name = "",
3209  .item_name = av_default_item_name,
3210  .option = opts,
3211  .version = LIBAVUTIL_VERSION_INT,
3212  };
3213  const AVClass *pclass = &class;
3214 
3215  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3216  if (ret < 0)
3217  goto dump_format;
3218  }
3219  }
3220 
3221  /* init input streams */
3222  for (i = 0; i < nb_input_streams; i++)
3223  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3224  for (i = 0; i < nb_output_streams; i++) {
3225  ost = output_streams[i];
3226  avcodec_close(ost->enc_ctx);
3227  }
3228  goto dump_format;
3229  }
3230 
3231  /* open each encoder */
3232  for (i = 0; i < nb_output_streams; i++) {
3233  ret = init_output_stream(output_streams[i], error, sizeof(error));
3234  if (ret < 0)
3235  goto dump_format;
3236  }
3237 
3238  /* discard unused programs */
3239  for (i = 0; i < nb_input_files; i++) {
3240  InputFile *ifile = input_files[i];
3241  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3242  AVProgram *p = ifile->ctx->programs[j];
3243  int discard = AVDISCARD_ALL;
3244 
3245  for (k = 0; k < p->nb_stream_indexes; k++)
3246  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3247  discard = AVDISCARD_DEFAULT;
3248  break;
3249  }
3250  p->discard = discard;
3251  }
3252  }
3253 
3254  /* open files and write file headers */
3255  for (i = 0; i < nb_output_files; i++) {
3256  oc = output_files[i]->ctx;
3257  oc->interrupt_callback = int_cb;
3258  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3259  snprintf(error, sizeof(error),
3260  "Could not write header for output file #%d "
3261  "(incorrect codec parameters ?): %s",
3262  i, av_err2str(ret));
3263  ret = AVERROR(EINVAL);
3264  goto dump_format;
3265  }
3266 // assert_avoptions(output_files[i]->opts);
3267  if (strcmp(oc->oformat->name, "rtp")) {
3268  want_sdp = 0;
3269  }
3270  }
3271 
3272  dump_format:
3273  /* dump the file output parameters - cannot be done before in case
3274  of stream copy */
3275  for (i = 0; i < nb_output_files; i++) {
3276  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3277  }
3278 
3279  /* dump the stream mapping */
3280  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3281  for (i = 0; i < nb_input_streams; i++) {
3282  ist = input_streams[i];
3283 
3284  for (j = 0; j < ist->nb_filters; j++) {
3285  if (ist->filters[j]->graph->graph_desc) {
3286  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3287  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3288  ist->filters[j]->name);
3289  if (nb_filtergraphs > 1)
3290  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3291  av_log(NULL, AV_LOG_INFO, "\n");
3292  }
3293  }
3294  }
3295 
3296  for (i = 0; i < nb_output_streams; i++) {
3297  ost = output_streams[i];
3298 
3299  if (ost->attachment_filename) {
3300  /* an attached file */
3301  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3302  ost->attachment_filename, ost->file_index, ost->index);
3303  continue;
3304  }
3305 
3306  if (ost->filter && ost->filter->graph->graph_desc) {
3307  /* output from a complex graph */
3308  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3309  if (nb_filtergraphs > 1)
3310  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3311 
3312  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3313  ost->index, ost->enc ? ost->enc->name : "?");
3314  continue;
3315  }
3316 
3317  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3318  input_streams[ost->source_index]->file_index,
3319  input_streams[ost->source_index]->st->index,
3320  ost->file_index,
3321  ost->index);
3322  if (ost->sync_ist != input_streams[ost->source_index])
3323  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3324  ost->sync_ist->file_index,
3325  ost->sync_ist->st->index);
3326  if (ost->stream_copy)
3327  av_log(NULL, AV_LOG_INFO, " (copy)");
3328  else {
3329  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3330  const AVCodec *out_codec = ost->enc;
3331  const char *decoder_name = "?";
3332  const char *in_codec_name = "?";
3333  const char *encoder_name = "?";
3334  const char *out_codec_name = "?";
3335  const AVCodecDescriptor *desc;
3336 
3337  if (in_codec) {
3338  decoder_name = in_codec->name;
3339  desc = avcodec_descriptor_get(in_codec->id);
3340  if (desc)
3341  in_codec_name = desc->name;
3342  if (!strcmp(decoder_name, in_codec_name))
3343  decoder_name = "native";
3344  }
3345 
3346  if (out_codec) {
3347  encoder_name = out_codec->name;
3348  desc = avcodec_descriptor_get(out_codec->id);
3349  if (desc)
3350  out_codec_name = desc->name;
3351  if (!strcmp(encoder_name, out_codec_name))
3352  encoder_name = "native";
3353  }
3354 
3355  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3356  in_codec_name, decoder_name,
3357  out_codec_name, encoder_name);
3358  }
3359  av_log(NULL, AV_LOG_INFO, "\n");
3360  }
3361 
3362  if (ret) {
3363  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3364  return ret;
3365  }
3366 
3367  if (sdp_filename || want_sdp) {
3368  print_sdp();
3369  }
3370 
3371  transcode_init_done = 1;
3372 
3373  return 0;
3374 }
3375 
3376 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3377 static int need_output(void)
3378 {
3379  int i;
3380 
3381  for (i = 0; i < nb_output_streams; i++) {
3382  OutputStream *ost = output_streams[i];
3383  OutputFile *of = output_files[ost->file_index];
3384  AVFormatContext *os = output_files[ost->file_index]->ctx;
3385 
3386  if (ost->finished ||
3387  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3388  continue;
3389  if (ost->frame_number >= ost->max_frames) {
3390  int j;
3391  for (j = 0; j < of->ctx->nb_streams; j++)
3392  close_output_stream(output_streams[of->ost_index + j]);
3393  continue;
3394  }
3395 
3396  return 1;
3397  }
3398 
3399  return 0;
3400 }
3401 
3402 /**
3403  * Select the output stream to process.
3404  *
3405  * @return selected output stream, or NULL if none available
3406  */
3408 {
3409  int i;
3410  int64_t opts_min = INT64_MAX;
3411  OutputStream *ost_min = NULL;
3412 
3413  for (i = 0; i < nb_output_streams; i++) {
3414  OutputStream *ost = output_streams[i];
3415  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3416  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3417  AV_TIME_BASE_Q);
3418  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3419  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3420 
3421  if (!ost->finished && opts < opts_min) {
3422  opts_min = opts;
3423  ost_min = ost->unavailable ? NULL : ost;
3424  }
3425  }
3426  return ost_min;
3427 }
3428 
3429 static void set_tty_echo(int on)
3430 {
3431 #if HAVE_TERMIOS_H
3432  struct termios tty;
3433  if (tcgetattr(0, &tty) == 0) {
3434  if (on) tty.c_lflag |= ECHO;
3435  else tty.c_lflag &= ~ECHO;
3436  tcsetattr(0, TCSANOW, &tty);
3437  }
3438 #endif
3439 }
3440 
3442 {
3443  int i, ret, key;
3444  static int64_t last_time;
3445  if (received_nb_signals)
3446  return AVERROR_EXIT;
3447  /* read_key() returns 0 on EOF */
3448  if(cur_time - last_time >= 100000 && !run_as_daemon){
3449  key = read_key();
3450  last_time = cur_time;
3451  }else
3452  key = -1;
3453  if (key == 'q')
3454  return AVERROR_EXIT;
3455  if (key == '+') av_log_set_level(av_log_get_level()+10);
3456  if (key == '-') av_log_set_level(av_log_get_level()-10);
3457  if (key == 's') qp_hist ^= 1;
3458  if (key == 'h'){
3459  if (do_hex_dump){
3460  do_hex_dump = do_pkt_dump = 0;
3461  } else if(do_pkt_dump){
3462  do_hex_dump = 1;
3463  } else
3464  do_pkt_dump = 1;
3466  }
3467  if (key == 'c' || key == 'C'){
3468  char buf[4096], target[64], command[256], arg[256] = {0};
3469  double time;
3470  int k, n = 0;
3471  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3472  i = 0;
3473  set_tty_echo(1);
3474  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3475  if (k > 0)
3476  buf[i++] = k;
3477  buf[i] = 0;
3478  set_tty_echo(0);
3479  fprintf(stderr, "\n");
3480  if (k > 0 &&
3481  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3482  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3483  target, time, command, arg);
3484  for (i = 0; i < nb_filtergraphs; i++) {
3485  FilterGraph *fg = filtergraphs[i];
3486  if (fg->graph) {
3487  if (time < 0) {
3488  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3489  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3490  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3491  } else if (key == 'c') {
3492  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3493  ret = AVERROR_PATCHWELCOME;
3494  } else {
3495  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3496  if (ret < 0)
3497  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3498  }
3499  }
3500  }
3501  } else {
3503  "Parse error, at least 3 arguments were expected, "
3504  "only %d given in string '%s'\n", n, buf);
3505  }
3506  }
3507  if (key == 'd' || key == 'D'){
3508  int debug=0;
3509  if(key == 'D') {
3510  debug = input_streams[0]->st->codec->debug<<1;
3511  if(!debug) debug = 1;
3512  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3513  debug += debug;
3514  }else{
3515  char buf[32];
3516  int k = 0;
3517  i = 0;
3518  set_tty_echo(1);
3519  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3520  if (k > 0)
3521  buf[i++] = k;
3522  buf[i] = 0;
3523  set_tty_echo(0);
3524  fprintf(stderr, "\n");
3525  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3526  fprintf(stderr,"error parsing debug value\n");
3527  }
3528  for(i=0;i<nb_input_streams;i++) {
3529  input_streams[i]->st->codec->debug = debug;
3530  }
3531  for(i=0;i<nb_output_streams;i++) {
3532  OutputStream *ost = output_streams[i];
3533  ost->enc_ctx->debug = debug;
3534  }
3535  if(debug) av_log_set_level(AV_LOG_DEBUG);
3536  fprintf(stderr,"debug=%d\n", debug);
3537  }
3538  if (key == '?'){
3539  fprintf(stderr, "key function\n"
3540  "? show this help\n"
3541  "+ increase verbosity\n"
3542  "- decrease verbosity\n"
3543  "c Send command to first matching filter supporting it\n"
3544  "C Send/Que command to all matching filters\n"
3545  "D cycle through available debug modes\n"
3546  "h dump packets/hex press to cycle through the 3 states\n"
3547  "q quit\n"
3548  "s Show QP histogram\n"
3549  );
3550  }
3551  return 0;
3552 }
3553 
3554 #if HAVE_PTHREADS
3555 static void *input_thread(void *arg)
3556 {
3557  InputFile *f = arg;
3558  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3559  int ret = 0;
3560 
3561  while (1) {
3562  AVPacket pkt;
3563  ret = av_read_frame(f->ctx, &pkt);
3564 
3565  if (ret == AVERROR(EAGAIN)) {
3566  av_usleep(10000);
3567  continue;
3568  }
3569  if (ret < 0) {
3570  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3571  break;
3572  }
3573  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3574  if (flags && ret == AVERROR(EAGAIN)) {
3575  flags = 0;
3576  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3578  "Thread message queue blocking; consider raising the "
3579  "thread_queue_size option (current value: %d)\n",
3580  f->thread_queue_size);
3581  }
3582  if (ret < 0) {
3583  if (ret != AVERROR_EOF)
3584  av_log(f->ctx, AV_LOG_ERROR,
3585  "Unable to send packet to main thread: %s\n",
3586  av_err2str(ret));
3587  av_packet_unref(&pkt);
3588  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3589  break;
3590  }
3591  }
3592 
3593  return NULL;
3594 }
3595 
3596 static void free_input_threads(void)
3597 {
3598  int i;
3599 
3600  for (i = 0; i < nb_input_files; i++) {
3601  InputFile *f = input_files[i];
3602  AVPacket pkt;
3603 
3604  if (!f || !f->in_thread_queue)
3605  continue;
3607  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3608  av_packet_unref(&pkt);
3609 
3610  pthread_join(f->thread, NULL);
3611  f->joined = 1;
3612  av_thread_message_queue_free(&f->in_thread_queue);
3613  }
3614 }
3615 
3616 static int init_input_threads(void)
3617 {
3618  int i, ret;
3619 
3620  if (nb_input_files == 1)
3621  return 0;
3622 
3623  for (i = 0; i < nb_input_files; i++) {
3624  InputFile *f = input_files[i];
3625 
3626  if (f->ctx->pb ? !f->ctx->pb->seekable :
3627  strcmp(f->ctx->iformat->name, "lavfi"))
3628  f->non_blocking = 1;
3629  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3630  f->thread_queue_size, sizeof(AVPacket));
3631  if (ret < 0)
3632  return ret;
3633 
3634  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3635  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3636  av_thread_message_queue_free(&f->in_thread_queue);
3637  return AVERROR(ret);
3638  }
3639  }
3640  return 0;
3641 }
3642 
3643 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3644 {
3645  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3646  f->non_blocking ?
3648 }
3649 #endif
3650 
3652 {
3653  if (f->rate_emu) {
3654  int i;
3655  for (i = 0; i < f->nb_streams; i++) {
3656  InputStream *ist = input_streams[f->ist_index + i];
3657  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3658  int64_t now = av_gettime_relative() - ist->start;
3659  if (pts > now)
3660  return AVERROR(EAGAIN);
3661  }
3662  }
3663 
3664 #if HAVE_PTHREADS
3665  if (nb_input_files > 1)
3666  return get_input_packet_mt(f, pkt);
3667 #endif
3668  return av_read_frame(f->ctx, pkt);
3669 }
3670 
3671 static int got_eagain(void)
3672 {
3673  int i;
3674  for (i = 0; i < nb_output_streams; i++)
3675  if (output_streams[i]->unavailable)
3676  return 1;
3677  return 0;
3678 }
3679 
3680 static void reset_eagain(void)
3681 {
3682  int i;
3683  for (i = 0; i < nb_input_files; i++)
3684  input_files[i]->eagain = 0;
3685  for (i = 0; i < nb_output_streams; i++)
3686  output_streams[i]->unavailable = 0;
3687 }
3688 
3689 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3690 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3691  AVRational time_base)
3692 {
3693  int ret;
3694 
3695  if (!*duration) {
3696  *duration = tmp;
3697  return tmp_time_base;
3698  }
3699 
3700  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3701  if (ret < 0) {
3702  *duration = tmp;
3703  return tmp_time_base;
3704  }
3705 
3706  return time_base;
3707 }
3708 
3709 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3710 {
3711  InputStream *ist;
3712  AVCodecContext *avctx;
3713  int i, ret, has_audio = 0;
3714  int64_t duration = 0;
3715 
3716  ret = av_seek_frame(is, -1, is->start_time, 0);
3717  if (ret < 0)
3718  return ret;
3719 
3720  for (i = 0; i < ifile->nb_streams; i++) {
3721  ist = input_streams[ifile->ist_index + i];
3722  avctx = ist->dec_ctx;
3723 
3724  // flush decoders
3725  if (ist->decoding_needed) {
3726  process_input_packet(ist, NULL, 1);
3727  avcodec_flush_buffers(avctx);
3728  }
3729 
3730  /* duration is the length of the last frame in a stream
3731  * when audio stream is present we don't care about
3732  * last video frame length because it's not defined exactly */
3733  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3734  has_audio = 1;
3735  }
3736 
3737  for (i = 0; i < ifile->nb_streams; i++) {
3738  ist = input_streams[ifile->ist_index + i];
3739  avctx = ist->dec_ctx;
3740 
3741  if (has_audio) {
3742  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3743  AVRational sample_rate = {1, avctx->sample_rate};
3744 
3745  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3746  } else
3747  continue;
3748  } else {
3749  if (ist->framerate.num) {
3750  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3751  } else if (ist->st->avg_frame_rate.num) {
3752  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3753  } else duration = 1;
3754  }
3755  if (!ifile->duration)
3756  ifile->time_base = ist->st->time_base;
3757  /* the total duration of the stream, max_pts - min_pts is
3758  * the duration of the stream without the last frame */
3759  duration += ist->max_pts - ist->min_pts;
3760  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3761  ifile->time_base);
3762  }
3763 
3764  if (ifile->loop > 0)
3765  ifile->loop--;
3766 
3767  return ret;
3768 }
3769 
3770 /*
3771  * Return
3772  * - 0 -- one packet was read and processed
3773  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3774  * this function should be called again
3775  * - AVERROR_EOF -- this function should not be called again
3776  */
3777 static int process_input(int file_index)
3778 {
3779  InputFile *ifile = input_files[file_index];
3780  AVFormatContext *is;
3781  InputStream *ist;
3782  AVPacket pkt;
3783  int ret, i, j;
3784  int64_t duration;
3785  int64_t pkt_dts;
3786 
3787  is = ifile->ctx;
3788  ret = get_input_packet(ifile, &pkt);
3789 
3790  if (ret == AVERROR(EAGAIN)) {
3791  ifile->eagain = 1;
3792  return ret;
3793  }
3794  if (ret < 0 && ifile->loop) {
3795  if ((ret = seek_to_start(ifile, is)) < 0)
3796  return ret;
3797  ret = get_input_packet(ifile, &pkt);
3798  }
3799  if (ret < 0) {
3800  if (ret != AVERROR_EOF) {
3801  print_error(is->filename, ret);
3802  if (exit_on_error)
3803  exit_program(1);
3804  }
3805 
3806  for (i = 0; i < ifile->nb_streams; i++) {
3807  ist = input_streams[ifile->ist_index + i];
3808  if (ist->decoding_needed) {
3809  ret = process_input_packet(ist, NULL, 0);
3810  if (ret>0)
3811  return 0;
3812  }
3813 
3814  /* mark all outputs that don't go through lavfi as finished */
3815  for (j = 0; j < nb_output_streams; j++) {
3816  OutputStream *ost = output_streams[j];
3817 
3818  if (ost->source_index == ifile->ist_index + i &&
3819  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3820  finish_output_stream(ost);
3821  }
3822  }
3823 
3824  ifile->eof_reached = 1;
3825  return AVERROR(EAGAIN);
3826  }
3827 
3828  reset_eagain();
3829 
3830  if (do_pkt_dump) {
3832  is->streams[pkt.stream_index]);
3833  }
3834  /* the following test is needed in case new streams appear
3835  dynamically in stream : we ignore them */
3836  if (pkt.stream_index >= ifile->nb_streams) {
3837  report_new_stream(file_index, &pkt);
3838  goto discard_packet;
3839  }
3840 
3841  ist = input_streams[ifile->ist_index + pkt.stream_index];
3842 
3843  ist->data_size += pkt.size;
3844  ist->nb_packets++;
3845 
3846  if (ist->discard)
3847  goto discard_packet;
3848 
3849  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3850  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3851  exit_program(1);
3852  }
3853 
3854  if (debug_ts) {
3855  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3856  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3860  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3861  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3862  av_ts2str(input_files[ist->file_index]->ts_offset),
3863  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3864  }
3865 
3866  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3867  int64_t stime, stime2;
3868  // Correcting starttime based on the enabled streams
3869  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3870  // so we instead do it here as part of discontinuity handling
3871  if ( ist->next_dts == AV_NOPTS_VALUE
3872  && ifile->ts_offset == -is->start_time
3873  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3874  int64_t new_start_time = INT64_MAX;
3875  for (i=0; i<is->nb_streams; i++) {
3876  AVStream *st = is->streams[i];
3877  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3878  continue;
3879  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3880  }
3881  if (new_start_time > is->start_time) {
3882  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3883  ifile->ts_offset = -new_start_time;
3884  }
3885  }
3886 
3887  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3888  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3889  ist->wrap_correction_done = 1;
3890 
3891  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3892  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3893  ist->wrap_correction_done = 0;
3894  }
3895  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3896  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3897  ist->wrap_correction_done = 0;
3898  }
3899  }
3900 
3901  /* add the stream-global side data to the first packet */
3902  if (ist->nb_packets == 1) {
3903  if (ist->st->nb_side_data)
3905  for (i = 0; i < ist->st->nb_side_data; i++) {
3906  AVPacketSideData *src_sd = &ist->st->side_data[i];
3907  uint8_t *dst_data;
3908 
3909  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3910  continue;
3911  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3912  continue;
3913 
3914  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3915  if (!dst_data)
3916  exit_program(1);
3917 
3918  memcpy(dst_data, src_sd->data, src_sd->size);
3919  }
3920  }
3921 
3922  if (pkt.dts != AV_NOPTS_VALUE)
3923  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3924  if (pkt.pts != AV_NOPTS_VALUE)
3925  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3926 
3927  if (pkt.pts != AV_NOPTS_VALUE)
3928  pkt.pts *= ist->ts_scale;
3929  if (pkt.dts != AV_NOPTS_VALUE)
3930  pkt.dts *= ist->ts_scale;
3931 
3933  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3935  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3936  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3937  int64_t delta = pkt_dts - ifile->last_ts;
3938  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3939  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3940  ifile->ts_offset -= delta;
3942  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3943  delta, ifile->ts_offset);
3944  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3945  if (pkt.pts != AV_NOPTS_VALUE)
3946  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3947  }
3948  }
3949 
3950  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3951  if (pkt.pts != AV_NOPTS_VALUE) {
3952  pkt.pts += duration;
3953  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3954  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3955  }
3956 
3957  if (pkt.dts != AV_NOPTS_VALUE)
3958  pkt.dts += duration;
3959 
3961  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3963  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3964  !copy_ts) {
3965  int64_t delta = pkt_dts - ist->next_dts;
3966  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3967  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3968  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3969  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3970  ifile->ts_offset -= delta;
3972  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3973  delta, ifile->ts_offset);
3974  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3975  if (pkt.pts != AV_NOPTS_VALUE)
3976  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3977  }
3978  } else {
3979  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3980  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3981  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3982  pkt.dts = AV_NOPTS_VALUE;
3983  }
3984  if (pkt.pts != AV_NOPTS_VALUE){
3985  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3986  delta = pkt_pts - ist->next_dts;
3987  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3988  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3989  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3990  pkt.pts = AV_NOPTS_VALUE;
3991  }
3992  }
3993  }
3994  }
3995 
3996  if (pkt.dts != AV_NOPTS_VALUE)
3997  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3998 
3999  if (debug_ts) {
4000  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4002  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4003  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4004  av_ts2str(input_files[ist->file_index]->ts_offset),
4005  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4006  }
4007 
4008  sub2video_heartbeat(ist, pkt.pts);
4009 
4010  process_input_packet(ist, &pkt, 0);
4011 
4012 discard_packet:
4013  av_packet_unref(&pkt);
4014 
4015  return 0;
4016 }
4017 
4018 /**
4019  * Perform a step of transcoding for the specified filter graph.
4020  *
4021  * @param[in] graph filter graph to consider
4022  * @param[out] best_ist input stream where a frame would allow to continue
4023  * @return 0 for success, <0 for error
4024  */
4025 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4026 {
4027  int i, ret;
4028  int nb_requests, nb_requests_max = 0;
4029  InputFilter *ifilter;
4030  InputStream *ist;
4031 
4032  *best_ist = NULL;
4033  ret = avfilter_graph_request_oldest(graph->graph);
4034  if (ret >= 0)
4035  return reap_filters(0);
4036 
4037  if (ret == AVERROR_EOF) {
4038  ret = reap_filters(1);
4039  for (i = 0; i < graph->nb_outputs; i++)
4040  close_output_stream(graph->outputs[i]->ost);
4041  return ret;
4042  }
4043  if (ret != AVERROR(EAGAIN))
4044  return ret;
4045 
4046  for (i = 0; i < graph->nb_inputs; i++) {
4047  ifilter = graph->inputs[i];
4048  ist = ifilter->ist;
4049  if (input_files[ist->file_index]->eagain ||
4050  input_files[ist->file_index]->eof_reached)
4051  continue;
4052  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4053  if (nb_requests > nb_requests_max) {
4054  nb_requests_max = nb_requests;
4055  *best_ist = ist;
4056  }
4057  }
4058 
4059  if (!*best_ist)
4060  for (i = 0; i < graph->nb_outputs; i++)
4061  graph->outputs[i]->ost->unavailable = 1;
4062 
4063  return 0;
4064 }
4065 
4066 /**
4067  * Run a single step of transcoding.
4068  *
4069  * @return 0 for success, <0 for error
4070  */
4071 static int transcode_step(void)
4072 {
4073  OutputStream *ost;
4074  InputStream *ist;
4075  int ret;
4076 
4077  ost = choose_output();
4078  if (!ost) {
4079  if (got_eagain()) {
4080  reset_eagain();
4081  av_usleep(10000);
4082  return 0;
4083  }
4084  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4085  return AVERROR_EOF;
4086  }
4087 
4088  if (ost->filter) {
4089  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4090  return ret;
4091  if (!ist)
4092  return 0;
4093  } else {
4094  av_assert0(ost->source_index >= 0);
4095  ist = input_streams[ost->source_index];
4096  }
4097 
4098  ret = process_input(ist->file_index);
4099  if (ret == AVERROR(EAGAIN)) {
4100  if (input_files[ist->file_index]->eagain)
4101  ost->unavailable = 1;
4102  return 0;
4103  }
4104 
4105  if (ret < 0)
4106  return ret == AVERROR_EOF ? 0 : ret;
4107 
4108  return reap_filters(0);
4109 }
4110 
4111 /*
4112  * The following code is the main loop of the file converter
4113  */
4114 static int transcode(void)
4115 {
4116  int ret, i;
4117  AVFormatContext *os;
4118  OutputStream *ost;
4119  InputStream *ist;
4120  int64_t timer_start;
4121  int64_t total_packets_written = 0;
4122 
4123  ret = transcode_init();
4124  if (ret < 0)
4125  goto fail;
4126 
4127  if (stdin_interaction) {
4128  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4129  }
4130 
4131  timer_start = av_gettime_relative();
4132 
4133 #if HAVE_PTHREADS
4134  if ((ret = init_input_threads()) < 0)
4135  goto fail;
4136 #endif
4137 
4138  while (!received_sigterm) {
4139  int64_t cur_time= av_gettime_relative();
4140 
4141  /* if 'q' pressed, exits */
4142  if (stdin_interaction)
4143  if (check_keyboard_interaction(cur_time) < 0)
4144  break;
4145 
4146  /* check if there's any stream where output is still needed */
4147  if (!need_output()) {
4148  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4149  break;
4150  }
4151 
4152  ret = transcode_step();
4153  if (ret < 0 && ret != AVERROR_EOF) {
4154  char errbuf[128];
4155  av_strerror(ret, errbuf, sizeof(errbuf));
4156 
4157  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4158  break;
4159  }
4160 
4161  /* dump report by using the output first video and audio streams */
4162  print_report(0, timer_start, cur_time);
4163  }
4164 #if HAVE_PTHREADS
4165  free_input_threads();
4166 #endif
4167 
4168  /* at the end of stream, we must flush the decoder buffers */
4169  for (i = 0; i < nb_input_streams; i++) {
4170  ist = input_streams[i];
4171  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4172  process_input_packet(ist, NULL, 0);
4173  }
4174  }
4175  flush_encoders();
4176 
4177  term_exit();
4178 
4179  /* write the trailer if needed and close file */
4180  for (i = 0; i < nb_output_files; i++) {
4181  os = output_files[i]->ctx;
4182  if ((ret = av_write_trailer(os)) < 0) {
4183  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4184  if (exit_on_error)
4185  exit_program(1);
4186  }
4187  }
4188 
4189  /* dump report by using the first video and audio streams */
4190  print_report(1, timer_start, av_gettime_relative());
4191 
4192  /* close each encoder */
4193  for (i = 0; i < nb_output_streams; i++) {
4194  ost = output_streams[i];
4195  if (ost->encoding_needed) {
4196  av_freep(&ost->enc_ctx->stats_in);
4197  }
4198  total_packets_written += ost->packets_written;
4199  }
4200 
4201  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4202  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4203  exit_program(1);
4204  }
4205 
4206  /* close each decoder */
4207  for (i = 0; i < nb_input_streams; i++) {
4208  ist = input_streams[i];
4209  if (ist->decoding_needed) {
4210  avcodec_close(ist->dec_ctx);
4211  if (ist->hwaccel_uninit)
4212  ist->hwaccel_uninit(ist->dec_ctx);
4213  }
4214  }
4215 
4216  /* finished ! */
4217  ret = 0;
4218 
4219  fail:
4220 #if HAVE_PTHREADS
4221  free_input_threads();
4222 #endif
4223 
4224  if (output_streams) {
4225  for (i = 0; i < nb_output_streams; i++) {
4226  ost = output_streams[i];
4227  if (ost) {
4228  if (ost->logfile) {
4229  if (fclose(ost->logfile))
4231  "Error closing logfile, loss of information possible: %s\n",
4232  av_err2str(AVERROR(errno)));
4233  ost->logfile = NULL;
4234  }
4235  av_freep(&ost->forced_kf_pts);
4236  av_freep(&ost->apad);
4237  av_freep(&ost->disposition);
4238  av_dict_free(&ost->encoder_opts);
4239  av_dict_free(&ost->sws_dict);
4240  av_dict_free(&ost->swr_opts);
4241  av_dict_free(&ost->resample_opts);
4242  }
4243  }
4244  }
4245  return ret;
4246 }
4247 
4248 
4249 static int64_t getutime(void)
4250 {
4251 #if HAVE_GETRUSAGE
4252  struct rusage rusage;
4253 
4254  getrusage(RUSAGE_SELF, &rusage);
4255  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4256 #elif HAVE_GETPROCESSTIMES
4257  HANDLE proc;
4258  FILETIME c, e, k, u;
4259  proc = GetCurrentProcess();
4260  GetProcessTimes(proc, &c, &e, &k, &u);
4261  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4262 #else
4263  return av_gettime_relative();
4264 #endif
4265 }
4266 
4267 static int64_t getmaxrss(void)
4268 {
4269 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4270  struct rusage rusage;
4271  getrusage(RUSAGE_SELF, &rusage);
4272  return (int64_t)rusage.ru_maxrss * 1024;
4273 #elif HAVE_GETPROCESSMEMORYINFO
4274  HANDLE proc;
4275  PROCESS_MEMORY_COUNTERS memcounters;
4276  proc = GetCurrentProcess();
4277  memcounters.cb = sizeof(memcounters);
4278  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4279  return memcounters.PeakPagefileUsage;
4280 #else
4281  return 0;
4282 #endif
4283 }
4284 
4285 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4286 {
4287 }
4288 
4289 int main(int argc, char **argv)
4290 {
4291  int ret;
4292  int64_t ti;
4293 
4295 
4296  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4297 
4299  parse_loglevel(argc, argv, options);
4300 
4301  if(argc>1 && !strcmp(argv[1], "-d")){
4302  run_as_daemon=1;
4304  argc--;
4305  argv++;
4306  }
4307 
4309 #if CONFIG_AVDEVICE
4311 #endif
4313  av_register_all();
4315 
4316  show_banner(argc, argv, options);
4317 
4318  term_init();
4319 
4320  /* parse options and open all input/output files */
4321  ret = ffmpeg_parse_options(argc, argv);
4322  if (ret < 0)
4323  exit_program(1);
4324 
4325  if (nb_output_files <= 0 && nb_input_files == 0) {
4326  show_usage();
4327  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4328  exit_program(1);
4329  }
4330 
4331  /* file converter / grab */
4332  if (nb_output_files <= 0) {
4333  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4334  exit_program(1);
4335  }
4336 
4337 // if (nb_input_files == 0) {
4338 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4339 // exit_program(1);
4340 // }
4341 
4342  current_time = ti = getutime();
4343  if (transcode() < 0)
4344  exit_program(1);
4345  ti = getutime() - ti;
4346  if (do_benchmark) {
4347  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4348  }
4349  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4352  exit_program(69);
4353 
4355  return main_return_code;
4356 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1522
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:275
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:886
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2901
int got_output
Definition: ffmpeg.h:304
#define AV_DISPOSITION_METADATA
Definition: avformat.h:860
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1798
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1058
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1911
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:403
const struct AVCodec * codec
Definition: avcodec.h:1648
Definition: ffmpeg.h:380
AVRational framerate
Definition: avcodec.h:3327
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:330
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:493
void term_init(void)
Definition: ffmpeg.c:366
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:292
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:234
int nb_outputs
Definition: ffmpeg.h:251
AVDictionary * swr_opts
Definition: ffmpeg.h:455
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:261
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2193
int resample_channels
Definition: ffmpeg.h:299
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:180
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:308
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:3036
int stream_copy
Definition: ffmpeg.h:460
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1044
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3853
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1571
AVOption.
Definition: opt.h:245
AVRational frame_rate
Definition: ffmpeg.h:425
int64_t * forced_kf_pts
Definition: ffmpeg.h:434
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:288
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2977
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:450
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:865
#define CODEC_FLAG_PASS2
Definition: avcodec.h:1071
static int process_input(int file_index)
Definition: ffmpeg.c:3777
int exit_on_error
Definition: ffmpeg_opt.c:107
AVFormatContext * ctx
Definition: movenc-test.c:48
const char * fmt
Definition: avisynth_c.h:632
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2597
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1704
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1594
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:127
Memory buffer source API.
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:288
AVCodecParserContext * parser
Definition: ffmpeg.h:468
static int64_t cur_time
Definition: ffserver.c:262
int64_t max_pts
Definition: ffmpeg.h:281
int decoding_needed
Definition: ffmpeg.h:259
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:943
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1507
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:429
int index
stream index in AVFormatContext
Definition: avformat.h:877
int size
Definition: avcodec.h:1575
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4267
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:128
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2585
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:543
#define AV_DISPOSITION_DUB
Definition: avformat.h:830
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2050
int eagain
Definition: ffmpeg.h:351
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1133
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1867
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:413
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2274
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:604
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:842
int quality
Definition: ffmpeg.h:480
unsigned num_rects
Definition: avcodec.h:3891
AVFrame * filter_frame
Definition: ffmpeg.h:266
static int transcode_init(void)
Definition: ffmpeg.c:2813
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2592
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2283
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2815
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:100
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:844
int last_dropped
Definition: ffmpeg.h:419
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:658
discard all
Definition: avcodec.h:778
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:983
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:333
int64_t input_ts_offset
Definition: ffmpeg.h:357
int do_hex_dump
Definition: ffmpeg_opt.c:101
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1769
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3039
int nb_input_streams
Definition: ffmpeg.c:138
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:61
const char * name
Definition: ffmpeg.h:71
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2581
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2757
Picture data structure.
Definition: avcodec.h:3820
int profile
profile
Definition: avcodec.h:3143
uint64_t packets_written
Definition: ffmpeg.h:474
AVCodec.
Definition: avcodec.h:3531
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:182
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2437
#define FF_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:738
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:494
int print_stats
Definition: ffmpeg_opt.c:109