FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int current_time;
134 
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194  r->x, r->y, r->w, r->h, w, h
195  );
196  return;
197  }
198 
199  dst += r->y * dst_linesize + r->x * 4;
200  src = r->pict.data[0];
201  pal = (uint32_t *)r->pict.data[1];
202  for (y = 0; y < r->h; y++) {
203  dst2 = (uint32_t *)dst;
204  src2 = src;
205  for (x = 0; x < r->w; x++)
206  *(dst2++) = pal[*(src2++)];
207  dst += dst_linesize;
208  src += r->pict.linesize[0];
209  }
210 }
211 
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
213 {
214  AVFrame *frame = ist->sub2video.frame;
215  int i;
216 
217  av_assert1(frame->data[0]);
218  ist->sub2video.last_pts = frame->pts = pts;
219  for (i = 0; i < ist->nb_filters; i++)
223 }
224 
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226 {
227  AVFrame *frame = ist->sub2video.frame;
228  int8_t *dst;
229  int dst_linesize;
230  int num_rects, i;
231  int64_t pts, end_pts;
232 
233  if (!frame)
234  return;
235  if (sub) {
236  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237  AV_TIME_BASE_Q, ist->st->time_base);
238  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  num_rects = sub->num_rects;
241  } else {
242  pts = ist->sub2video.end_pts;
243  end_pts = INT64_MAX;
244  num_rects = 0;
245  }
246  if (sub2video_get_blank_frame(ist) < 0) {
248  "Impossible to get a blank canvas.\n");
249  return;
250  }
251  dst = frame->data [0];
252  dst_linesize = frame->linesize[0];
253  for (i = 0; i < num_rects; i++)
254  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255  sub2video_push_ref(ist, pts);
256  ist->sub2video.end_pts = end_pts;
257 }
258 
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260 {
261  InputFile *infile = input_files[ist->file_index];
262  int i, j, nb_reqs;
263  int64_t pts2;
264 
265  /* When a frame is read from a file, examine all sub2video streams in
266  the same file and send the sub2video frame again. Otherwise, decoded
267  video frames could be accumulating in the filter graph while a filter
268  (possibly overlay) is desperately waiting for a subtitle frame. */
269  for (i = 0; i < infile->nb_streams; i++) {
270  InputStream *ist2 = input_streams[infile->ist_index + i];
271  if (!ist2->sub2video.frame)
272  continue;
273  /* subtitles seem to be usually muxed ahead of other streams;
274  if not, subtracting a larger time here is necessary */
275  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276  /* do not send the heartbeat frame if the subtitle is already ahead */
277  if (pts2 <= ist2->sub2video.last_pts)
278  continue;
279  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280  sub2video_update(ist2, NULL);
281  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283  if (nb_reqs)
284  sub2video_push_ref(ist2, pts2);
285  }
286 }
287 
288 static void sub2video_flush(InputStream *ist)
289 {
290  int i;
291 
292  if (ist->sub2video.end_pts < INT64_MAX)
293  sub2video_update(ist, NULL);
294  for (i = 0; i < ist->nb_filters; i++)
296 }
297 
298 /* end of sub2video hack */
299 
300 static void term_exit_sigsafe(void)
301 {
302 #if HAVE_TERMIOS_H
303  if(restore_tty)
304  tcsetattr (0, TCSANOW, &oldtty);
305 #endif
306 }
307 
308 void term_exit(void)
309 {
310  av_log(NULL, AV_LOG_QUIET, "%s", "");
312 }
313 
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
319 
320 static void
322 {
323  received_sigterm = sig;
326  if(received_nb_signals > 3) {
327  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328  strlen("Received > 3 system signals, hard exiting\n"));
329 
330  exit(123);
331  }
332 }
333 
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336 {
337  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338 
339  switch (fdwCtrlType)
340  {
341  case CTRL_C_EVENT:
342  case CTRL_BREAK_EVENT:
343  sigterm_handler(SIGINT);
344  return TRUE;
345 
346  case CTRL_CLOSE_EVENT:
347  case CTRL_LOGOFF_EVENT:
348  case CTRL_SHUTDOWN_EVENT:
349  sigterm_handler(SIGTERM);
350  /* Basically, with these 3 events, when we return from this method the
351  process is hard terminated, so stall as long as we need to
352  to try and let the main thread(s) clean up and gracefully terminate
353  (we have at most 5 seconds, but should be done far before that). */
354  while (!ffmpeg_exited) {
355  Sleep(0);
356  }
357  return TRUE;
358 
359  default:
360  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361  return FALSE;
362  }
363 }
364 #endif
365 
366 void term_init(void)
367 {
368 #if HAVE_TERMIOS_H
369  if(!run_as_daemon){
370  struct termios tty;
371  if (tcgetattr (0, &tty) == 0) {
372  oldtty = tty;
373  restore_tty = 1;
374 
375  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376  |INLCR|IGNCR|ICRNL|IXON);
377  tty.c_oflag |= OPOST;
378  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379  tty.c_cflag &= ~(CSIZE|PARENB);
380  tty.c_cflag |= CS8;
381  tty.c_cc[VMIN] = 1;
382  tty.c_cc[VTIME] = 0;
383 
384  tcsetattr (0, TCSANOW, &tty);
385  }
386  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387  }
388 #endif
389 
390  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392 #ifdef SIGXCPU
393  signal(SIGXCPU, sigterm_handler);
394 #endif
395 #if HAVE_SETCONSOLECTRLHANDLER
396  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397 #endif
398 }
399 
400 /* read a key without blocking */
401 static int read_key(void)
402 {
403  unsigned char ch;
404 #if HAVE_TERMIOS_H
405  int n = 1;
406  struct timeval tv;
407  fd_set rfds;
408 
409  FD_ZERO(&rfds);
410  FD_SET(0, &rfds);
411  tv.tv_sec = 0;
412  tv.tv_usec = 0;
413  n = select(1, &rfds, NULL, NULL, &tv);
414  if (n > 0) {
415  n = read(0, &ch, 1);
416  if (n == 1)
417  return ch;
418 
419  return n;
420  }
421 #elif HAVE_KBHIT
422 # if HAVE_PEEKNAMEDPIPE
423  static int is_pipe;
424  static HANDLE input_handle;
425  DWORD dw, nchars;
426  if(!input_handle){
427  input_handle = GetStdHandle(STD_INPUT_HANDLE);
428  is_pipe = !GetConsoleMode(input_handle, &dw);
429  }
430 
431  if (is_pipe) {
432  /* When running under a GUI, you will end here. */
433  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434  // input pipe may have been closed by the program that ran ffmpeg
435  return -1;
436  }
437  //Read it
438  if(nchars != 0) {
439  read(0, &ch, 1);
440  return ch;
441  }else{
442  return -1;
443  }
444  }
445 # endif
446  if(kbhit())
447  return(getch());
448 #endif
449  return -1;
450 }
451 
452 static int decode_interrupt_cb(void *ctx)
453 {
455 }
456 
458 
459 static void ffmpeg_cleanup(int ret)
460 {
461  int i, j;
462 
463  if (do_benchmark) {
464  int maxrss = getmaxrss() / 1024;
465  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466  }
467 
468  for (i = 0; i < nb_filtergraphs; i++) {
469  FilterGraph *fg = filtergraphs[i];
471  for (j = 0; j < fg->nb_inputs; j++) {
472  av_freep(&fg->inputs[j]->name);
473  av_freep(&fg->inputs[j]);
474  }
475  av_freep(&fg->inputs);
476  for (j = 0; j < fg->nb_outputs; j++) {
477  av_freep(&fg->outputs[j]->name);
478  av_freep(&fg->outputs[j]);
479  }
480  av_freep(&fg->outputs);
481  av_freep(&fg->graph_desc);
482 
483  av_freep(&filtergraphs[i]);
484  }
485  av_freep(&filtergraphs);
486 
488 
489  /* close files */
490  for (i = 0; i < nb_output_files; i++) {
491  OutputFile *of = output_files[i];
493  if (!of)
494  continue;
495  s = of->ctx;
496  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497  avio_closep(&s->pb);
499  av_dict_free(&of->opts);
500 
501  av_freep(&output_files[i]);
502  }
503  for (i = 0; i < nb_output_streams; i++) {
504  OutputStream *ost = output_streams[i];
506 
507  if (!ost)
508  continue;
509 
510  bsfc = ost->bitstream_filters;
511  while (bsfc) {
512  AVBitStreamFilterContext *next = bsfc->next;
514  bsfc = next;
515  }
516  ost->bitstream_filters = NULL;
518  av_frame_free(&ost->last_frame);
519 
520  av_parser_close(ost->parser);
521 
522  av_freep(&ost->forced_keyframes);
524  av_freep(&ost->avfilter);
525  av_freep(&ost->logfile_prefix);
526 
528  ost->audio_channels_mapped = 0;
529 
530  av_dict_free(&ost->sws_dict);
531 
533 
534  av_freep(&output_streams[i]);
535  }
536 #if HAVE_PTHREADS
537  free_input_threads();
538 #endif
539  for (i = 0; i < nb_input_files; i++) {
540  avformat_close_input(&input_files[i]->ctx);
541  av_freep(&input_files[i]);
542  }
543  for (i = 0; i < nb_input_streams; i++) {
544  InputStream *ist = input_streams[i];
545 
548  av_dict_free(&ist->decoder_opts);
551  av_freep(&ist->filters);
552  av_freep(&ist->hwaccel_device);
553 
555 
556  av_freep(&input_streams[i]);
557  }
558 
559  if (vstats_file) {
560  if (fclose(vstats_file))
562  "Error closing vstats file, loss of information possible: %s\n",
563  av_err2str(AVERROR(errno)));
564  }
566 
567  av_freep(&input_streams);
568  av_freep(&input_files);
569  av_freep(&output_streams);
570  av_freep(&output_files);
571 
572  uninit_opts();
573 
575 
576  if (received_sigterm) {
577  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578  (int) received_sigterm);
579  } else if (ret && transcode_init_done) {
580  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581  }
582  term_exit();
583  ffmpeg_exited = 1;
584 }
585 
587 {
588  AVDictionaryEntry *t = NULL;
589 
590  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592  }
593 }
594 
596 {
598  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600  exit_program(1);
601  }
602 }
603 
604 static void abort_codec_experimental(AVCodec *c, int encoder)
605 {
606  exit_program(1);
607 }
608 
609 static void update_benchmark(const char *fmt, ...)
610 {
611  if (do_benchmark_all) {
612  int64_t t = getutime();
613  va_list va;
614  char buf[1024];
615 
616  if (fmt) {
617  va_start(va, fmt);
618  vsnprintf(buf, sizeof(buf), fmt, va);
619  va_end(va);
620  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621  }
622  current_time = t;
623  }
624 }
625 
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 {
628  int i;
629  for (i = 0; i < nb_output_streams; i++) {
630  OutputStream *ost2 = output_streams[i];
631  ost2->finished |= ost == ost2 ? this_stream : others;
632  }
633 }
634 
636 {
638  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639  int ret;
640 
641  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
643  if (ost->st->codec->extradata) {
644  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
646  }
647  }
648 
651  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652 
653  /*
654  * Audio encoders may split the packets -- #frames in != #packets out.
655  * But there is no reordering, so we can limit the number of output packets
656  * by simply dropping them here.
657  * Counting encoded video frames needs to be done separately because of
658  * reordering, see do_video_out()
659  */
660  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661  if (ost->frame_number >= ost->max_frames) {
662  av_packet_unref(pkt);
663  return;
664  }
665  ost->frame_number++;
666  }
667  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668  int i;
670  NULL);
671  ost->quality = sd ? AV_RL32(sd) : -1;
672  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
673 
674  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
675  if (sd && i < sd[5])
676  ost->error[i] = AV_RL64(sd + 8 + 8*i);
677  else
678  ost->error[i] = -1;
679  }
680 
681  if (ost->frame_rate.num && ost->is_cfr) {
682  if (pkt->duration > 0)
683  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
685  ost->st->time_base);
686  }
687  }
688 
689  if (bsfc)
691 
692  if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693  print_error("", ret);
694  if (exit_on_error)
695  exit_program(1);
696  }
697 
698  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
699  if (pkt->dts != AV_NOPTS_VALUE &&
700  pkt->pts != AV_NOPTS_VALUE &&
701  pkt->dts > pkt->pts) {
702  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
703  pkt->dts, pkt->pts,
704  ost->file_index, ost->st->index);
705  pkt->pts =
706  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
707  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
708  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
709  }
710  if(
711  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
712  pkt->dts != AV_NOPTS_VALUE &&
713  ost->last_mux_dts != AV_NOPTS_VALUE) {
714  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
715  if (pkt->dts < max) {
716  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
717  av_log(s, loglevel, "Non-monotonous DTS in output stream "
718  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
719  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
720  if (exit_on_error) {
721  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
722  exit_program(1);
723  }
724  av_log(s, loglevel, "changing to %"PRId64". This may result "
725  "in incorrect timestamps in the output file.\n",
726  max);
727  if(pkt->pts >= pkt->dts)
728  pkt->pts = FFMAX(pkt->pts, max);
729  pkt->dts = max;
730  }
731  }
732  }
733  ost->last_mux_dts = pkt->dts;
734 
735  ost->data_size += pkt->size;
736  ost->packets_written++;
737 
738  pkt->stream_index = ost->index;
739 
740  if (debug_ts) {
741  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
742  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
744  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
745  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
746  pkt->size
747  );
748  }
749 
750  ret = av_interleaved_write_frame(s, pkt);
751  if (ret < 0) {
752  print_error("av_interleaved_write_frame()", ret);
753  main_return_code = 1;
755  }
756  av_packet_unref(pkt);
757 }
758 
760 {
761  OutputFile *of = output_files[ost->file_index];
762 
763  ost->finished |= ENCODER_FINISHED;
764  if (of->shortest) {
765  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
766  of->recording_time = FFMIN(of->recording_time, end);
767  }
768 }
769 
771 {
772  OutputFile *of = output_files[ost->file_index];
773 
774  if (of->recording_time != INT64_MAX &&
776  AV_TIME_BASE_Q) >= 0) {
777  close_output_stream(ost);
778  return 0;
779  }
780  return 1;
781 }
782 
784  AVFrame *frame)
785 {
786  AVCodecContext *enc = ost->enc_ctx;
787  AVPacket pkt;
788  int got_packet = 0;
789 
790  av_init_packet(&pkt);
791  pkt.data = NULL;
792  pkt.size = 0;
793 
794  if (!check_recording_time(ost))
795  return;
796 
797  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
798  frame->pts = ost->sync_opts;
799  ost->sync_opts = frame->pts + frame->nb_samples;
800  ost->samples_encoded += frame->nb_samples;
801  ost->frames_encoded++;
802 
803  av_assert0(pkt.size || !pkt.data);
805  if (debug_ts) {
806  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
807  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
808  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
809  enc->time_base.num, enc->time_base.den);
810  }
811 
812  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
813  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
814  exit_program(1);
815  }
816  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
817 
818  if (got_packet) {
819  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
820 
821  if (debug_ts) {
822  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
823  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
824  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
825  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
826  }
827 
828  write_frame(s, &pkt, ost);
829  }
830 }
831 
833  OutputStream *ost,
834  InputStream *ist,
835  AVSubtitle *sub)
836 {
837  int subtitle_out_max_size = 1024 * 1024;
838  int subtitle_out_size, nb, i;
839  AVCodecContext *enc;
840  AVPacket pkt;
841  int64_t pts;
842 
843  if (sub->pts == AV_NOPTS_VALUE) {
844  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
845  if (exit_on_error)
846  exit_program(1);
847  return;
848  }
849 
850  enc = ost->enc_ctx;
851 
852  if (!subtitle_out) {
853  subtitle_out = av_malloc(subtitle_out_max_size);
854  if (!subtitle_out) {
855  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
856  exit_program(1);
857  }
858  }
859 
860  /* Note: DVB subtitle need one packet to draw them and one other
861  packet to clear them */
862  /* XXX: signal it in the codec context ? */
864  nb = 2;
865  else
866  nb = 1;
867 
868  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
869  pts = sub->pts;
870  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
871  pts -= output_files[ost->file_index]->start_time;
872  for (i = 0; i < nb; i++) {
873  unsigned save_num_rects = sub->num_rects;
874 
875  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
876  if (!check_recording_time(ost))
877  return;
878 
879  sub->pts = pts;
880  // start_display_time is required to be 0
881  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
883  sub->start_display_time = 0;
884  if (i == 1)
885  sub->num_rects = 0;
886 
887  ost->frames_encoded++;
888 
889  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
890  subtitle_out_max_size, sub);
891  if (i == 1)
892  sub->num_rects = save_num_rects;
893  if (subtitle_out_size < 0) {
894  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
895  exit_program(1);
896  }
897 
898  av_init_packet(&pkt);
899  pkt.data = subtitle_out;
900  pkt.size = subtitle_out_size;
901  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
902  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
903  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
904  /* XXX: the pts correction is handled here. Maybe handling
905  it in the codec would be better */
906  if (i == 0)
907  pkt.pts += 90 * sub->start_display_time;
908  else
909  pkt.pts += 90 * sub->end_display_time;
910  }
911  pkt.dts = pkt.pts;
912  write_frame(s, &pkt, ost);
913  }
914 }
915 
917  OutputStream *ost,
918  AVFrame *next_picture,
919  double sync_ipts)
920 {
921  int ret, format_video_sync;
922  AVPacket pkt;
923  AVCodecContext *enc = ost->enc_ctx;
924  AVCodecContext *mux_enc = ost->st->codec;
925  int nb_frames, nb0_frames, i;
926  double delta, delta0;
927  double duration = 0;
928  int frame_size = 0;
929  InputStream *ist = NULL;
931 
932  if (ost->source_index >= 0)
933  ist = input_streams[ost->source_index];
934 
935  if (filter->inputs[0]->frame_rate.num > 0 &&
936  filter->inputs[0]->frame_rate.den > 0)
937  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
938 
939  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
940  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
941 
942  if (!ost->filters_script &&
943  !ost->filters &&
944  next_picture &&
945  ist &&
946  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
947  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
948  }
949 
950  if (!next_picture) {
951  //end, flushing
952  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
953  ost->last_nb0_frames[1],
954  ost->last_nb0_frames[2]);
955  } else {
956  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
957  delta = delta0 + duration;
958 
959  /* by default, we output a single frame */
960  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
961  nb_frames = 1;
962 
963  format_video_sync = video_sync_method;
964  if (format_video_sync == VSYNC_AUTO) {
965  if(!strcmp(s->oformat->name, "avi")) {
966  format_video_sync = VSYNC_VFR;
967  } else
969  if ( ist
970  && format_video_sync == VSYNC_CFR
971  && input_files[ist->file_index]->ctx->nb_streams == 1
972  && input_files[ist->file_index]->input_ts_offset == 0) {
973  format_video_sync = VSYNC_VSCFR;
974  }
975  if (format_video_sync == VSYNC_CFR && copy_ts) {
976  format_video_sync = VSYNC_VSCFR;
977  }
978  }
979  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
980 
981  if (delta0 < 0 &&
982  delta > 0 &&
983  format_video_sync != VSYNC_PASSTHROUGH &&
984  format_video_sync != VSYNC_DROP) {
985  if (delta0 < -0.6) {
986  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
987  } else
988  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
989  sync_ipts = ost->sync_opts;
990  duration += delta0;
991  delta0 = 0;
992  }
993 
994  switch (format_video_sync) {
995  case VSYNC_VSCFR:
996  if (ost->frame_number == 0 && delta0 >= 0.5) {
997  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
998  delta = duration;
999  delta0 = 0;
1000  ost->sync_opts = lrint(sync_ipts);
1001  }
1002  case VSYNC_CFR:
1003  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1004  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1005  nb_frames = 0;
1006  } else if (delta < -1.1)
1007  nb_frames = 0;
1008  else if (delta > 1.1) {
1009  nb_frames = lrintf(delta);
1010  if (delta0 > 1.1)
1011  nb0_frames = lrintf(delta0 - 0.6);
1012  }
1013  break;
1014  case VSYNC_VFR:
1015  if (delta <= -0.6)
1016  nb_frames = 0;
1017  else if (delta > 0.6)
1018  ost->sync_opts = lrint(sync_ipts);
1019  break;
1020  case VSYNC_DROP:
1021  case VSYNC_PASSTHROUGH:
1022  ost->sync_opts = lrint(sync_ipts);
1023  break;
1024  default:
1025  av_assert0(0);
1026  }
1027  }
1028 
1029  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1030  nb0_frames = FFMIN(nb0_frames, nb_frames);
1031 
1032  memmove(ost->last_nb0_frames + 1,
1033  ost->last_nb0_frames,
1034  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1035  ost->last_nb0_frames[0] = nb0_frames;
1036 
1037  if (nb0_frames == 0 && ost->last_dropped) {
1038  nb_frames_drop++;
1040  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1041  ost->frame_number, ost->st->index, ost->last_frame->pts);
1042  }
1043  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1044  if (nb_frames > dts_error_threshold * 30) {
1045  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1046  nb_frames_drop++;
1047  return;
1048  }
1049  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1050  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1051  }
1052  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1053 
1054  /* duplicates frame if needed */
1055  for (i = 0; i < nb_frames; i++) {
1056  AVFrame *in_picture;
1057  av_init_packet(&pkt);
1058  pkt.data = NULL;
1059  pkt.size = 0;
1060 
1061  if (i < nb0_frames && ost->last_frame) {
1062  in_picture = ost->last_frame;
1063  } else
1064  in_picture = next_picture;
1065 
1066  if (!in_picture)
1067  return;
1068 
1069  in_picture->pts = ost->sync_opts;
1070 
1071 #if 1
1072  if (!check_recording_time(ost))
1073 #else
1074  if (ost->frame_number >= ost->max_frames)
1075 #endif
1076  return;
1077 
1078 #if FF_API_LAVF_FMT_RAWPICTURE
1079  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1080  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1081  /* raw pictures are written as AVPicture structure to
1082  avoid any copies. We support temporarily the older
1083  method. */
1084  if (in_picture->interlaced_frame)
1085  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1086  else
1087  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1088  pkt.data = (uint8_t *)in_picture;
1089  pkt.size = sizeof(AVPicture);
1090  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1091  pkt.flags |= AV_PKT_FLAG_KEY;
1092 
1093  write_frame(s, &pkt, ost);
1094  } else
1095 #endif
1096  {
1097  int got_packet, forced_keyframe = 0;
1098  double pts_time;
1099 
1101  ost->top_field_first >= 0)
1102  in_picture->top_field_first = !!ost->top_field_first;
1103 
1104  if (in_picture->interlaced_frame) {
1105  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1106  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1107  else
1108  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1109  } else
1110  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1111 
1112  in_picture->quality = enc->global_quality;
1113  in_picture->pict_type = 0;
1114 
1115  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1116  in_picture->pts * av_q2d(enc->time_base) : NAN;
1117  if (ost->forced_kf_index < ost->forced_kf_count &&
1118  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1119  ost->forced_kf_index++;
1120  forced_keyframe = 1;
1121  } else if (ost->forced_keyframes_pexpr) {
1122  double res;
1123  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1126  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1132  res);
1133  if (res) {
1134  forced_keyframe = 1;
1140  }
1141 
1143  } else if ( ost->forced_keyframes
1144  && !strncmp(ost->forced_keyframes, "source", 6)
1145  && in_picture->key_frame==1) {
1146  forced_keyframe = 1;
1147  }
1148 
1149  if (forced_keyframe) {
1150  in_picture->pict_type = AV_PICTURE_TYPE_I;
1151  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1152  }
1153 
1155  if (debug_ts) {
1156  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1157  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1158  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1159  enc->time_base.num, enc->time_base.den);
1160  }
1161 
1162  ost->frames_encoded++;
1163 
1164  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1165  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1166  if (ret < 0) {
1167  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1168  exit_program(1);
1169  }
1170 
1171  if (got_packet) {
1172  if (debug_ts) {
1173  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1174  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1175  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1176  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1177  }
1178 
1179  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1180  pkt.pts = ost->sync_opts;
1181 
1182  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1183 
1184  if (debug_ts) {
1185  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1188  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1189  }
1190 
1191  frame_size = pkt.size;
1192  write_frame(s, &pkt, ost);
1193 
1194  /* if two pass, output log */
1195  if (ost->logfile && enc->stats_out) {
1196  fprintf(ost->logfile, "%s", enc->stats_out);
1197  }
1198  }
1199  }
1200  ost->sync_opts++;
1201  /*
1202  * For video, number of frames in == number of packets out.
1203  * But there may be reordering, so we can't throw away frames on encoder
1204  * flush, we need to limit them here, before they go into encoder.
1205  */
1206  ost->frame_number++;
1207 
1208  if (vstats_filename && frame_size)
1209  do_video_stats(ost, frame_size);
1210  }
1211 
1212  if (!ost->last_frame)
1213  ost->last_frame = av_frame_alloc();
1214  av_frame_unref(ost->last_frame);
1215  if (next_picture && ost->last_frame)
1216  av_frame_ref(ost->last_frame, next_picture);
1217  else
1218  av_frame_free(&ost->last_frame);
1219 }
1220 
1221 static double psnr(double d)
1222 {
1223  return -10.0 * log10(d);
1224 }
1225 
1227 {
1228  AVCodecContext *enc;
1229  int frame_number;
1230  double ti1, bitrate, avg_bitrate;
1231 
1232  /* this is executed just the first time do_video_stats is called */
1233  if (!vstats_file) {
1234  vstats_file = fopen(vstats_filename, "w");
1235  if (!vstats_file) {
1236  perror("fopen");
1237  exit_program(1);
1238  }
1239  }
1240 
1241  enc = ost->enc_ctx;
1242  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1243  frame_number = ost->st->nb_frames;
1244  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1245  ost->quality / (float)FF_QP2LAMBDA);
1246 
1247  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1248  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1249 
1250  fprintf(vstats_file,"f_size= %6d ", frame_size);
1251  /* compute pts value */
1252  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1253  if (ti1 < 0.01)
1254  ti1 = 0.01;
1255 
1256  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1257  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1258  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1259  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1260  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1261  }
1262 }
1263 
1265 {
1266  OutputFile *of = output_files[ost->file_index];
1267  int i;
1268 
1270 
1271  if (of->shortest) {
1272  for (i = 0; i < of->ctx->nb_streams; i++)
1273  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1274  }
1275 }
1276 
1277 /**
1278  * Get and encode new output from any of the filtergraphs, without causing
1279  * activity.
1280  *
1281  * @return 0 for success, <0 for severe errors
1282  */
1283 static int reap_filters(int flush)
1284 {
1285  AVFrame *filtered_frame = NULL;
1286  int i;
1287 
1288  /* Reap all buffers present in the buffer sinks */
1289  for (i = 0; i < nb_output_streams; i++) {
1290  OutputStream *ost = output_streams[i];
1291  OutputFile *of = output_files[ost->file_index];
1293  AVCodecContext *enc = ost->enc_ctx;
1294  int ret = 0;
1295 
1296  if (!ost->filter)
1297  continue;
1298  filter = ost->filter->filter;
1299 
1300  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1301  return AVERROR(ENOMEM);
1302  }
1303  filtered_frame = ost->filtered_frame;
1304 
1305  while (1) {
1306  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1307  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1309  if (ret < 0) {
1310  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1312  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1313  } else if (flush && ret == AVERROR_EOF) {
1314  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1315  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1316  }
1317  break;
1318  }
1319  if (ost->finished) {
1320  av_frame_unref(filtered_frame);
1321  continue;
1322  }
1323  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1324  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1325  AVRational tb = enc->time_base;
1326  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1327 
1328  tb.den <<= extra_bits;
1329  float_pts =
1330  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1331  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1332  float_pts /= 1 << extra_bits;
1333  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1334  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1335 
1336  filtered_frame->pts =
1337  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1338  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1339  }
1340  //if (ost->source_index >= 0)
1341  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1342 
1343  switch (filter->inputs[0]->type) {
1344  case AVMEDIA_TYPE_VIDEO:
1345  if (!ost->frame_aspect_ratio.num)
1346  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1347 
1348  if (debug_ts) {
1349  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1350  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1351  float_pts,
1352  enc->time_base.num, enc->time_base.den);
1353  }
1354 
1355  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1356  break;
1357  case AVMEDIA_TYPE_AUDIO:
1358  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1359  enc->channels != av_frame_get_channels(filtered_frame)) {
1361  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1362  break;
1363  }
1364  do_audio_out(of->ctx, ost, filtered_frame);
1365  break;
1366  default:
1367  // TODO support subtitle filters
1368  av_assert0(0);
1369  }
1370 
1371  av_frame_unref(filtered_frame);
1372  }
1373  }
1374 
1375  return 0;
1376 }
1377 
1378 static void print_final_stats(int64_t total_size)
1379 {
1380  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1381  uint64_t subtitle_size = 0;
1382  uint64_t data_size = 0;
1383  float percent = -1.0;
1384  int i, j;
1385  int pass1_used = 1;
1386 
1387  for (i = 0; i < nb_output_streams; i++) {
1388  OutputStream *ost = output_streams[i];
1389  switch (ost->enc_ctx->codec_type) {
1390  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1391  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1392  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1393  default: other_size += ost->data_size; break;
1394  }
1395  extra_size += ost->enc_ctx->extradata_size;
1396  data_size += ost->data_size;
1399  pass1_used = 0;
1400  }
1401 
1402  if (data_size && total_size>0 && total_size >= data_size)
1403  percent = 100.0 * (total_size - data_size) / data_size;
1404 
1405  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1406  video_size / 1024.0,
1407  audio_size / 1024.0,
1408  subtitle_size / 1024.0,
1409  other_size / 1024.0,
1410  extra_size / 1024.0);
1411  if (percent >= 0.0)
1412  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1413  else
1414  av_log(NULL, AV_LOG_INFO, "unknown");
1415  av_log(NULL, AV_LOG_INFO, "\n");
1416 
1417  /* print verbose per-stream stats */
1418  for (i = 0; i < nb_input_files; i++) {
1419  InputFile *f = input_files[i];
1420  uint64_t total_packets = 0, total_size = 0;
1421 
1422  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1423  i, f->ctx->filename);
1424 
1425  for (j = 0; j < f->nb_streams; j++) {
1426  InputStream *ist = input_streams[f->ist_index + j];
1427  enum AVMediaType type = ist->dec_ctx->codec_type;
1428 
1429  total_size += ist->data_size;
1430  total_packets += ist->nb_packets;
1431 
1432  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1433  i, j, media_type_string(type));
1434  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1435  ist->nb_packets, ist->data_size);
1436 
1437  if (ist->decoding_needed) {
1438  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1439  ist->frames_decoded);
1440  if (type == AVMEDIA_TYPE_AUDIO)
1441  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1442  av_log(NULL, AV_LOG_VERBOSE, "; ");
1443  }
1444 
1445  av_log(NULL, AV_LOG_VERBOSE, "\n");
1446  }
1447 
1448  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1449  total_packets, total_size);
1450  }
1451 
1452  for (i = 0; i < nb_output_files; i++) {
1453  OutputFile *of = output_files[i];
1454  uint64_t total_packets = 0, total_size = 0;
1455 
1456  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1457  i, of->ctx->filename);
1458 
1459  for (j = 0; j < of->ctx->nb_streams; j++) {
1460  OutputStream *ost = output_streams[of->ost_index + j];
1461  enum AVMediaType type = ost->enc_ctx->codec_type;
1462 
1463  total_size += ost->data_size;
1464  total_packets += ost->packets_written;
1465 
1466  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1467  i, j, media_type_string(type));
1468  if (ost->encoding_needed) {
1469  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1470  ost->frames_encoded);
1471  if (type == AVMEDIA_TYPE_AUDIO)
1472  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1473  av_log(NULL, AV_LOG_VERBOSE, "; ");
1474  }
1475 
1476  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1477  ost->packets_written, ost->data_size);
1478 
1479  av_log(NULL, AV_LOG_VERBOSE, "\n");
1480  }
1481 
1482  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1483  total_packets, total_size);
1484  }
1485  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1486  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1487  if (pass1_used) {
1488  av_log(NULL, AV_LOG_WARNING, "\n");
1489  } else {
1490  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1491  }
1492  }
1493 }
1494 
1495 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1496 {
1497  char buf[1024];
1498  AVBPrint buf_script;
1499  OutputStream *ost;
1500  AVFormatContext *oc;
1501  int64_t total_size;
1502  AVCodecContext *enc;
1503  int frame_number, vid, i;
1504  double bitrate;
1505  double speed;
1506  int64_t pts = INT64_MIN + 1;
1507  static int64_t last_time = -1;
1508  static int qp_histogram[52];
1509  int hours, mins, secs, us;
1510  int ret;
1511  float t;
1512 
1513  if (!print_stats && !is_last_report && !progress_avio)
1514  return;
1515 
1516  if (!is_last_report) {
1517  if (last_time == -1) {
1518  last_time = cur_time;
1519  return;
1520  }
1521  if ((cur_time - last_time) < 500000)
1522  return;
1523  last_time = cur_time;
1524  }
1525 
1526  t = (cur_time-timer_start) / 1000000.0;
1527 
1528 
1529  oc = output_files[0]->ctx;
1530 
1531  total_size = avio_size(oc->pb);
1532  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1533  total_size = avio_tell(oc->pb);
1534 
1535  buf[0] = '\0';
1536  vid = 0;
1537  av_bprint_init(&buf_script, 0, 1);
1538  for (i = 0; i < nb_output_streams; i++) {
1539  float q = -1;
1540  ost = output_streams[i];
1541  enc = ost->enc_ctx;
1542  if (!ost->stream_copy)
1543  q = ost->quality / (float) FF_QP2LAMBDA;
1544 
1545  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1547  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1548  ost->file_index, ost->index, q);
1549  }
1550  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1551  float fps;
1552 
1553  frame_number = ost->frame_number;
1554  fps = t > 1 ? frame_number / t : 0;
1555  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1556  frame_number, fps < 9.95, fps, q);
1557  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1558  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1559  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560  ost->file_index, ost->index, q);
1561  if (is_last_report)
1562  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1563  if (qp_hist) {
1564  int j;
1565  int qp = lrintf(q);
1566  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1567  qp_histogram[qp]++;
1568  for (j = 0; j < 32; j++)
1569  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1570  }
1571 
1572  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1573  int j;
1574  double error, error_sum = 0;
1575  double scale, scale_sum = 0;
1576  double p;
1577  char type[3] = { 'Y','U','V' };
1578  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1579  for (j = 0; j < 3; j++) {
1580  if (is_last_report) {
1581  error = enc->error[j];
1582  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1583  } else {
1584  error = ost->error[j];
1585  scale = enc->width * enc->height * 255.0 * 255.0;
1586  }
1587  if (j)
1588  scale /= 4;
1589  error_sum += error;
1590  scale_sum += scale;
1591  p = psnr(error / scale);
1592  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1593  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1594  ost->file_index, ost->index, type[j] | 32, p);
1595  }
1596  p = psnr(error_sum / scale_sum);
1597  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1598  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1599  ost->file_index, ost->index, p);
1600  }
1601  vid = 1;
1602  }
1603  /* compute min output value */
1605  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1606  ost->st->time_base, AV_TIME_BASE_Q));
1607  if (is_last_report)
1608  nb_frames_drop += ost->last_dropped;
1609  }
1610 
1611  secs = FFABS(pts) / AV_TIME_BASE;
1612  us = FFABS(pts) % AV_TIME_BASE;
1613  mins = secs / 60;
1614  secs %= 60;
1615  hours = mins / 60;
1616  mins %= 60;
1617 
1618  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1619  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1620 
1621  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1622  "size=N/A time=");
1623  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624  "size=%8.0fkB time=", total_size / 1024.0);
1625  if (pts < 0)
1626  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1629  (100 * us) / AV_TIME_BASE);
1630 
1631  if (bitrate < 0) {
1632  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633  av_bprintf(&buf_script, "bitrate=N/A\n");
1634  }else{
1635  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1637  }
1638 
1639  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643  hours, mins, secs, us);
1644 
1646  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1648  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1650 
1651  if (speed < 0) {
1652  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1653  av_bprintf(&buf_script, "speed=N/A\n");
1654  } else {
1655  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1656  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1657  }
1658 
1659  if (print_stats || is_last_report) {
1660  const char end = is_last_report ? '\n' : '\r';
1661  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662  fprintf(stderr, "%s %c", buf, end);
1663  } else
1664  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1665 
1666  fflush(stderr);
1667  }
1668 
1669  if (progress_avio) {
1670  av_bprintf(&buf_script, "progress=%s\n",
1671  is_last_report ? "end" : "continue");
1672  avio_write(progress_avio, buf_script.str,
1673  FFMIN(buf_script.len, buf_script.size - 1));
1674  avio_flush(progress_avio);
1675  av_bprint_finalize(&buf_script, NULL);
1676  if (is_last_report) {
1677  if ((ret = avio_closep(&progress_avio)) < 0)
1679  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1680  }
1681  }
1682 
1683  if (is_last_report)
1684  print_final_stats(total_size);
1685 }
1686 
1687 static void flush_encoders(void)
1688 {
1689  int i, ret;
1690 
1691  for (i = 0; i < nb_output_streams; i++) {
1692  OutputStream *ost = output_streams[i];
1693  AVCodecContext *enc = ost->enc_ctx;
1694  AVFormatContext *os = output_files[ost->file_index]->ctx;
1695  int stop_encoding = 0;
1696 
1697  if (!ost->encoding_needed)
1698  continue;
1699 
1700  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1701  continue;
1702 #if FF_API_LAVF_FMT_RAWPICTURE
1703  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1704  continue;
1705 #endif
1706 
1707  for (;;) {
1708  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1709  const char *desc;
1710 
1711  switch (enc->codec_type) {
1712  case AVMEDIA_TYPE_AUDIO:
1713  encode = avcodec_encode_audio2;
1714  desc = "audio";
1715  break;
1716  case AVMEDIA_TYPE_VIDEO:
1717  encode = avcodec_encode_video2;
1718  desc = "video";
1719  break;
1720  default:
1721  stop_encoding = 1;
1722  }
1723 
1724  if (encode) {
1725  AVPacket pkt;
1726  int pkt_size;
1727  int got_packet;
1728  av_init_packet(&pkt);
1729  pkt.data = NULL;
1730  pkt.size = 0;
1731 
1733  ret = encode(enc, &pkt, NULL, &got_packet);
1734  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1735  if (ret < 0) {
1736  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1737  desc,
1738  av_err2str(ret));
1739  exit_program(1);
1740  }
1741  if (ost->logfile && enc->stats_out) {
1742  fprintf(ost->logfile, "%s", enc->stats_out);
1743  }
1744  if (!got_packet) {
1745  stop_encoding = 1;
1746  break;
1747  }
1748  if (ost->finished & MUXER_FINISHED) {
1749  av_packet_unref(&pkt);
1750  continue;
1751  }
1752  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1753  pkt_size = pkt.size;
1754  write_frame(os, &pkt, ost);
1756  do_video_stats(ost, pkt_size);
1757  }
1758  }
1759 
1760  if (stop_encoding)
1761  break;
1762  }
1763  }
1764 }
1765 
1766 /*
1767  * Check whether a packet from ist should be written into ost at this time
1768  */
1770 {
1771  OutputFile *of = output_files[ost->file_index];
1772  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1773 
1774  if (ost->source_index != ist_index)
1775  return 0;
1776 
1777  if (ost->finished)
1778  return 0;
1779 
1780  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1781  return 0;
1782 
1783  return 1;
1784 }
1785 
1786 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1787 {
1788  OutputFile *of = output_files[ost->file_index];
1789  InputFile *f = input_files [ist->file_index];
1790  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1791  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1792  AVPicture pict;
1793  AVPacket opkt;
1794 
1795  av_init_packet(&opkt);
1796 
1797  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1799  return;
1800 
1801  if (!ost->frame_number && !ost->copy_prior_start) {
1802  int64_t comp_start = start_time;
1803  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1804  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1805  if (pkt->pts == AV_NOPTS_VALUE ?
1806  ist->pts < comp_start :
1807  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1808  return;
1809  }
1810 
1811  if (of->recording_time != INT64_MAX &&
1812  ist->pts >= of->recording_time + start_time) {
1813  close_output_stream(ost);
1814  return;
1815  }
1816 
1817  if (f->recording_time != INT64_MAX) {
1818  start_time = f->ctx->start_time;
1819  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1820  start_time += f->start_time;
1821  if (ist->pts >= f->recording_time + start_time) {
1822  close_output_stream(ost);
1823  return;
1824  }
1825  }
1826 
1827  /* force the input stream PTS */
1828  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1829  ost->sync_opts++;
1830 
1831  if (pkt->pts != AV_NOPTS_VALUE)
1832  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1833  else
1834  opkt.pts = AV_NOPTS_VALUE;
1835 
1836  if (pkt->dts == AV_NOPTS_VALUE)
1837  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1838  else
1839  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1840  opkt.dts -= ost_tb_start_time;
1841 
1842  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1844  if(!duration)
1845  duration = ist->dec_ctx->frame_size;
1846  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1848  ost->st->time_base) - ost_tb_start_time;
1849  }
1850 
1851  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1852  opkt.flags = pkt->flags;
1853  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1854  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1855  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1856  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1857  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1858  ) {
1859  int ret = av_parser_change(ost->parser, ost->st->codec,
1860  &opkt.data, &opkt.size,
1861  pkt->data, pkt->size,
1863  if (ret < 0) {
1864  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1865  av_err2str(ret));
1866  exit_program(1);
1867  }
1868  if (ret) {
1869  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1870  if (!opkt.buf)
1871  exit_program(1);
1872  }
1873  } else {
1874  opkt.data = pkt->data;
1875  opkt.size = pkt->size;
1876  }
1877  av_copy_packet_side_data(&opkt, pkt);
1878 
1879 #if FF_API_LAVF_FMT_RAWPICTURE
1880  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1881  ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1882  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1883  /* store AVPicture in AVPacket, as expected by the output format */
1884  int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1885  if (ret < 0) {
1886  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1887  av_err2str(ret));
1888  exit_program(1);
1889  }
1890  opkt.data = (uint8_t *)&pict;
1891  opkt.size = sizeof(AVPicture);
1892  opkt.flags |= AV_PKT_FLAG_KEY;
1893  }
1894 #endif
1895 
1896  write_frame(of->ctx, &opkt, ost);
1897 }
1898 
1900 {
1901  AVCodecContext *dec = ist->dec_ctx;
1902 
1903  if (!dec->channel_layout) {
1904  char layout_name[256];
1905 
1906  if (dec->channels > ist->guess_layout_max)
1907  return 0;
1909  if (!dec->channel_layout)
1910  return 0;
1911  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1912  dec->channels, dec->channel_layout);
1913  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1914  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1915  }
1916  return 1;
1917 }
1918 
1919 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1920 {
1921  if (*got_output || ret<0)
1922  decode_error_stat[ret<0] ++;
1923 
1924  if (ret < 0 && exit_on_error)
1925  exit_program(1);
1926 
1927  if (exit_on_error && *got_output && ist) {
1929  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1930  exit_program(1);
1931  }
1932  }
1933 }
1934 
1935 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1936 {
1937  AVFrame *decoded_frame, *f;
1938  AVCodecContext *avctx = ist->dec_ctx;
1939  int i, ret, err = 0, resample_changed;
1940  AVRational decoded_frame_tb;
1941 
1942  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1943  return AVERROR(ENOMEM);
1944  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1945  return AVERROR(ENOMEM);
1946  decoded_frame = ist->decoded_frame;
1947 
1949  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1950  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1951 
1952  if (ret >= 0 && avctx->sample_rate <= 0) {
1953  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1954  ret = AVERROR_INVALIDDATA;
1955  }
1956 
1957  check_decode_result(ist, got_output, ret);
1958 
1959  if (!*got_output || ret < 0)
1960  return ret;
1961 
1962  ist->samples_decoded += decoded_frame->nb_samples;
1963  ist->frames_decoded++;
1964 
1965 #if 1
1966  /* increment next_dts to use for the case where the input stream does not
1967  have timestamps or there are multiple frames in the packet */
1968  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1969  avctx->sample_rate;
1970  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1971  avctx->sample_rate;
1972 #endif
1973 
1974  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1975  ist->resample_channels != avctx->channels ||
1976  ist->resample_channel_layout != decoded_frame->channel_layout ||
1977  ist->resample_sample_rate != decoded_frame->sample_rate;
1978  if (resample_changed) {
1979  char layout1[64], layout2[64];
1980 
1981  if (!guess_input_channel_layout(ist)) {
1982  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1983  "layout for Input Stream #%d.%d\n", ist->file_index,
1984  ist->st->index);
1985  exit_program(1);
1986  }
1987  decoded_frame->channel_layout = avctx->channel_layout;
1988 
1989  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1991  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1992  decoded_frame->channel_layout);
1993 
1995  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1996  ist->file_index, ist->st->index,
1998  ist->resample_channels, layout1,
1999  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2000  avctx->channels, layout2);
2001 
2002  ist->resample_sample_fmt = decoded_frame->format;
2003  ist->resample_sample_rate = decoded_frame->sample_rate;
2004  ist->resample_channel_layout = decoded_frame->channel_layout;
2005  ist->resample_channels = avctx->channels;
2006 
2007  for (i = 0; i < nb_filtergraphs; i++)
2008  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2009  FilterGraph *fg = filtergraphs[i];
2010  if (configure_filtergraph(fg) < 0) {
2011  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2012  exit_program(1);
2013  }
2014  }
2015  }
2016 
2017  /* if the decoder provides a pts, use it instead of the last packet pts.
2018  the decoder could be delaying output by a packet or more. */
2019  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2020  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2021  decoded_frame_tb = avctx->time_base;
2022  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2023  decoded_frame->pts = decoded_frame->pkt_pts;
2024  decoded_frame_tb = ist->st->time_base;
2025  } else if (pkt->pts != AV_NOPTS_VALUE) {
2026  decoded_frame->pts = pkt->pts;
2027  decoded_frame_tb = ist->st->time_base;
2028  }else {
2029  decoded_frame->pts = ist->dts;
2030  decoded_frame_tb = AV_TIME_BASE_Q;
2031  }
2032  pkt->pts = AV_NOPTS_VALUE;
2033  if (decoded_frame->pts != AV_NOPTS_VALUE)
2034  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2035  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2036  (AVRational){1, avctx->sample_rate});
2037  ist->nb_samples = decoded_frame->nb_samples;
2038  for (i = 0; i < ist->nb_filters; i++) {
2039  if (i < ist->nb_filters - 1) {
2040  f = ist->filter_frame;
2041  err = av_frame_ref(f, decoded_frame);
2042  if (err < 0)
2043  break;
2044  } else
2045  f = decoded_frame;
2046  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2048  if (err == AVERROR_EOF)
2049  err = 0; /* ignore */
2050  if (err < 0)
2051  break;
2052  }
2053  decoded_frame->pts = AV_NOPTS_VALUE;
2054 
2055  av_frame_unref(ist->filter_frame);
2056  av_frame_unref(decoded_frame);
2057  return err < 0 ? err : ret;
2058 }
2059 
2060 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2061 {
2062  AVFrame *decoded_frame, *f;
2063  int i, ret = 0, err = 0, resample_changed;
2064  int64_t best_effort_timestamp;
2065  AVRational *frame_sample_aspect;
2066 
2067  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2068  return AVERROR(ENOMEM);
2069  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2070  return AVERROR(ENOMEM);
2071  decoded_frame = ist->decoded_frame;
2072  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2073 
2075  ret = avcodec_decode_video2(ist->dec_ctx,
2076  decoded_frame, got_output, pkt);
2077  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2078 
2079  // The following line may be required in some cases where there is no parser
2080  // or the parser does not has_b_frames correctly
2081  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2082  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2083  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2084  } else
2086  "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2087  "If you want to help, upload a sample "
2088  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2089  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2090  ist->dec_ctx->has_b_frames,
2091  ist->st->codec->has_b_frames);
2092  }
2093 
2094  check_decode_result(ist, got_output, ret);
2095 
2096  if (*got_output && ret >= 0) {
2097  if (ist->dec_ctx->width != decoded_frame->width ||
2098  ist->dec_ctx->height != decoded_frame->height ||
2099  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2100  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2101  decoded_frame->width,
2102  decoded_frame->height,
2103  decoded_frame->format,
2104  ist->dec_ctx->width,
2105  ist->dec_ctx->height,
2106  ist->dec_ctx->pix_fmt);
2107  }
2108  }
2109 
2110  if (!*got_output || ret < 0)
2111  return ret;
2112 
2113  if(ist->top_field_first>=0)
2114  decoded_frame->top_field_first = ist->top_field_first;
2115 
2116  ist->frames_decoded++;
2117 
2118  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2119  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2120  if (err < 0)
2121  goto fail;
2122  }
2123  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2124 
2125  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2126  if(best_effort_timestamp != AV_NOPTS_VALUE)
2127  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2128 
2129  if (debug_ts) {
2130  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2131  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2132  ist->st->index, av_ts2str(decoded_frame->pts),
2133  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2134  best_effort_timestamp,
2135  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2136  decoded_frame->key_frame, decoded_frame->pict_type,
2137  ist->st->time_base.num, ist->st->time_base.den);
2138  }
2139 
2140  pkt->size = 0;
2141 
2142  if (ist->st->sample_aspect_ratio.num)
2143  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2144 
2145  resample_changed = ist->resample_width != decoded_frame->width ||
2146  ist->resample_height != decoded_frame->height ||
2147  ist->resample_pix_fmt != decoded_frame->format;
2148  if (resample_changed) {
2150  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2151  ist->file_index, ist->st->index,
2153  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2154 
2155  ist->resample_width = decoded_frame->width;
2156  ist->resample_height = decoded_frame->height;
2157  ist->resample_pix_fmt = decoded_frame->format;
2158 
2159  for (i = 0; i < nb_filtergraphs; i++) {
2160  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2161  configure_filtergraph(filtergraphs[i]) < 0) {
2162  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2163  exit_program(1);
2164  }
2165  }
2166  }
2167 
2168  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2169  for (i = 0; i < ist->nb_filters; i++) {
2170  if (!frame_sample_aspect->num)
2171  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2172 
2173  if (i < ist->nb_filters - 1) {
2174  f = ist->filter_frame;
2175  err = av_frame_ref(f, decoded_frame);
2176  if (err < 0)
2177  break;
2178  } else
2179  f = decoded_frame;
2181  if (ret == AVERROR_EOF) {
2182  ret = 0; /* ignore */
2183  } else if (ret < 0) {
2185  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2186  exit_program(1);
2187  }
2188  }
2189 
2190 fail:
2192  av_frame_unref(decoded_frame);
2193  return err < 0 ? err : ret;
2194 }
2195 
2196 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2197 {
2198  AVSubtitle subtitle;
2199  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2200  &subtitle, got_output, pkt);
2201 
2202  check_decode_result(NULL, got_output, ret);
2203 
2204  if (ret < 0 || !*got_output) {
2205  if (!pkt->size)
2206  sub2video_flush(ist);
2207  return ret;
2208  }
2209 
2210  if (ist->fix_sub_duration) {
2211  int end = 1;
2212  if (ist->prev_sub.got_output) {
2213  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2214  1000, AV_TIME_BASE);
2215  if (end < ist->prev_sub.subtitle.end_display_time) {
2216  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2217  "Subtitle duration reduced from %d to %d%s\n",
2219  end <= 0 ? ", dropping it" : "");
2221  }
2222  }
2223  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2224  FFSWAP(int, ret, ist->prev_sub.ret);
2225  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2226  if (end <= 0)
2227  goto out;
2228  }
2229 
2230  if (!*got_output)
2231  return ret;
2232 
2233  sub2video_update(ist, &subtitle);
2234 
2235  if (!subtitle.num_rects)
2236  goto out;
2237 
2238  ist->frames_decoded++;
2239 
2240  for (i = 0; i < nb_output_streams; i++) {
2241  OutputStream *ost = output_streams[i];
2242 
2243  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2244  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2245  continue;
2246 
2247  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2248  }
2249 
2250 out:
2251  avsubtitle_free(&subtitle);
2252  return ret;
2253 }
2254 
2256 {
2257  int i, ret;
2258  for (i = 0; i < ist->nb_filters; i++) {
2259  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2260  if (ret < 0)
2261  return ret;
2262  }
2263  return 0;
2264 }
2265 
2266 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2267 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2268 {
2269  int ret = 0, i;
2270  int got_output = 0;
2271 
2272  AVPacket avpkt;
2273  if (!ist->saw_first_ts) {
2274  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2275  ist->pts = 0;
2276  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2277  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2278  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2279  }
2280  ist->saw_first_ts = 1;
2281  }
2282 
2283  if (ist->next_dts == AV_NOPTS_VALUE)
2284  ist->next_dts = ist->dts;
2285  if (ist->next_pts == AV_NOPTS_VALUE)
2286  ist->next_pts = ist->pts;
2287 
2288  if (!pkt) {
2289  /* EOF handling */
2290  av_init_packet(&avpkt);
2291  avpkt.data = NULL;
2292  avpkt.size = 0;
2293  goto handle_eof;
2294  } else {
2295  avpkt = *pkt;
2296  }
2297 
2298  if (pkt->dts != AV_NOPTS_VALUE) {
2299  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2300  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2301  ist->next_pts = ist->pts = ist->dts;
2302  }
2303 
2304  // while we have more to decode or while the decoder did output something on EOF
2305  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2306  int duration;
2307  handle_eof:
2308 
2309  ist->pts = ist->next_pts;
2310  ist->dts = ist->next_dts;
2311 
2312  if (avpkt.size && avpkt.size != pkt->size &&
2315  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2316  ist->showed_multi_packet_warning = 1;
2317  }
2318 
2319  switch (ist->dec_ctx->codec_type) {
2320  case AVMEDIA_TYPE_AUDIO:
2321  ret = decode_audio (ist, &avpkt, &got_output);
2322  break;
2323  case AVMEDIA_TYPE_VIDEO:
2324  ret = decode_video (ist, &avpkt, &got_output);
2325  if (avpkt.duration) {
2326  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2327  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2329  duration = ((int64_t)AV_TIME_BASE *
2330  ist->dec_ctx->framerate.den * ticks) /
2332  } else
2333  duration = 0;
2334 
2335  if(ist->dts != AV_NOPTS_VALUE && duration) {
2336  ist->next_dts += duration;
2337  }else
2338  ist->next_dts = AV_NOPTS_VALUE;
2339 
2340  if (got_output)
2341  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2342  break;
2343  case AVMEDIA_TYPE_SUBTITLE:
2344  ret = transcode_subtitles(ist, &avpkt, &got_output);
2345  break;
2346  default:
2347  return -1;
2348  }
2349 
2350  if (ret < 0) {
2351  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2352  ist->file_index, ist->st->index, av_err2str(ret));
2353  if (exit_on_error)
2354  exit_program(1);
2355  break;
2356  }
2357 
2358  avpkt.dts=
2359  avpkt.pts= AV_NOPTS_VALUE;
2360 
2361  // touch data and size only if not EOF
2362  if (pkt) {
2364  ret = avpkt.size;
2365  avpkt.data += ret;
2366  avpkt.size -= ret;
2367  }
2368  if (!got_output) {
2369  continue;
2370  }
2371  if (got_output && !pkt)
2372  break;
2373  }
2374 
2375  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2376  /* except when looping we need to flush but not to send an EOF */
2377  if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2378  int ret = send_filter_eof(ist);
2379  if (ret < 0) {
2380  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2381  exit_program(1);
2382  }
2383  }
2384 
2385  /* handle stream copy */
2386  if (!ist->decoding_needed) {
2387  ist->dts = ist->next_dts;
2388  switch (ist->dec_ctx->codec_type) {
2389  case AVMEDIA_TYPE_AUDIO:
2390  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2391  ist->dec_ctx->sample_rate;
2392  break;
2393  case AVMEDIA_TYPE_VIDEO:
2394  if (ist->framerate.num) {
2395  // TODO: Remove work-around for c99-to-c89 issue 7
2396  AVRational time_base_q = AV_TIME_BASE_Q;
2397  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2398  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2399  } else if (pkt->duration) {
2400  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2401  } else if(ist->dec_ctx->framerate.num != 0) {
2402  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2403  ist->next_dts += ((int64_t)AV_TIME_BASE *
2404  ist->dec_ctx->framerate.den * ticks) /
2406  }
2407  break;
2408  }
2409  ist->pts = ist->dts;
2410  ist->next_pts = ist->next_dts;
2411  }
2412  for (i = 0; pkt && i < nb_output_streams; i++) {
2413  OutputStream *ost = output_streams[i];
2414 
2415  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2416  continue;
2417 
2418  do_streamcopy(ist, ost, pkt);
2419  }
2420 
2421  return got_output;
2422 }
2423 
2424 static void print_sdp(void)
2425 {
2426  char sdp[16384];
2427  int i;
2428  int j;
2429  AVIOContext *sdp_pb;
2430  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2431 
2432  if (!avc)
2433  exit_program(1);
2434  for (i = 0, j = 0; i < nb_output_files; i++) {
2435  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2436  avc[j] = output_files[i]->ctx;
2437  j++;
2438  }
2439  }
2440 
2441  if (!j)
2442  goto fail;
2443 
2444  av_sdp_create(avc, j, sdp, sizeof(sdp));
2445 
2446  if (!sdp_filename) {
2447  printf("SDP:\n%s\n", sdp);
2448  fflush(stdout);
2449  } else {
2450  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2451  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2452  } else {
2453  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2454  avio_closep(&sdp_pb);
2456  }
2457  }
2458 
2459 fail:
2460  av_freep(&avc);
2461 }
2462 
2464 {
2465  int i;
2466  for (i = 0; hwaccels[i].name; i++)
2467  if (hwaccels[i].pix_fmt == pix_fmt)
2468  return &hwaccels[i];
2469  return NULL;
2470 }
2471 
2473 {
2474  InputStream *ist = s->opaque;
2475  const enum AVPixelFormat *p;
2476  int ret;
2477 
2478  for (p = pix_fmts; *p != -1; p++) {
2479  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2480  const HWAccel *hwaccel;
2481 
2482  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2483  break;
2484 
2485  hwaccel = get_hwaccel(*p);
2486  if (!hwaccel ||
2487  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2488  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2489  continue;
2490 
2491  ret = hwaccel->init(s);
2492  if (ret < 0) {
2493  if (ist->hwaccel_id == hwaccel->id) {
2495  "%s hwaccel requested for input stream #%d:%d, "
2496  "but cannot be initialized.\n", hwaccel->name,
2497  ist->file_index, ist->st->index);
2498  return AV_PIX_FMT_NONE;
2499  }
2500  continue;
2501  }
2502  ist->active_hwaccel_id = hwaccel->id;
2503  ist->hwaccel_pix_fmt = *p;
2504  break;
2505  }
2506 
2507  return *p;
2508 }
2509 
2511 {
2512  InputStream *ist = s->opaque;
2513 
2514  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2515  return ist->hwaccel_get_buffer(s, frame, flags);
2516 
2517  return avcodec_default_get_buffer2(s, frame, flags);
2518 }
2519 
2520 static int init_input_stream(int ist_index, char *error, int error_len)
2521 {
2522  int ret;
2523  InputStream *ist = input_streams[ist_index];
2524 
2525  if (ist->decoding_needed) {
2526  AVCodec *codec = ist->dec;
2527  if (!codec) {
2528  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2529  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2530  return AVERROR(EINVAL);
2531  }
2532 
2533  ist->dec_ctx->opaque = ist;
2534  ist->dec_ctx->get_format = get_format;
2535  ist->dec_ctx->get_buffer2 = get_buffer;
2536  ist->dec_ctx->thread_safe_callbacks = 1;
2537 
2538  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2539  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2540  (ist->decoding_needed & DECODING_FOR_OST)) {
2541  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2543  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2544  }
2545 
2546  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2547  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2548  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2549  if (ret == AVERROR_EXPERIMENTAL)
2550  abort_codec_experimental(codec, 0);
2551 
2552  snprintf(error, error_len,
2553  "Error while opening decoder for input stream "
2554  "#%d:%d : %s",
2555  ist->file_index, ist->st->index, av_err2str(ret));
2556  return ret;
2557  }
2559  }
2560 
2561  ist->next_pts = AV_NOPTS_VALUE;
2562  ist->next_dts = AV_NOPTS_VALUE;
2563 
2564  return 0;
2565 }
2566 
2568 {
2569  if (ost->source_index >= 0)
2570  return input_streams[ost->source_index];
2571  return NULL;
2572 }
2573 
2574 static int compare_int64(const void *a, const void *b)
2575 {
2576  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2577 }
2578 
2579 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2580 {
2581  int ret = 0;
2582 
2583  if (ost->encoding_needed) {
2584  AVCodec *codec = ost->enc;
2585  AVCodecContext *dec = NULL;
2586  InputStream *ist;
2587 
2588  if ((ist = get_input_stream(ost)))
2589  dec = ist->dec_ctx;
2590  if (dec && dec->subtitle_header) {
2591  /* ASS code assumes this buffer is null terminated so add extra byte. */
2593  if (!ost->enc_ctx->subtitle_header)
2594  return AVERROR(ENOMEM);
2595  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2597  }
2598  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2599  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2600  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2601  !codec->defaults &&
2602  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2603  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2604  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2605 
2606  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2607  if (ret == AVERROR_EXPERIMENTAL)
2608  abort_codec_experimental(codec, 1);
2609  snprintf(error, error_len,
2610  "Error while opening encoder for output stream #%d:%d - "
2611  "maybe incorrect parameters such as bit_rate, rate, width or height",
2612  ost->file_index, ost->index);
2613  return ret;
2614  }
2615  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2618  ost->enc_ctx->frame_size);
2620  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2621  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2622  " It takes bits/s as argument, not kbits/s\n");
2623 
2624  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2625  if (ret < 0) {
2627  "Error initializing the output stream codec context.\n");
2628  exit_program(1);
2629  }
2630 
2631  if (ost->enc_ctx->nb_coded_side_data) {
2632  int i;
2633 
2635  sizeof(*ost->st->side_data));
2636  if (!ost->st->side_data)
2637  return AVERROR(ENOMEM);
2638 
2639  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2640  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2641  AVPacketSideData *sd_dst = &ost->st->side_data[i];
2642 
2643  sd_dst->data = av_malloc(sd_src->size);
2644  if (!sd_dst->data)
2645  return AVERROR(ENOMEM);
2646  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2647  sd_dst->size = sd_src->size;
2648  sd_dst->type = sd_src->type;
2649  ost->st->nb_side_data++;
2650  }
2651  }
2652 
2653  // copy timebase while removing common factors
2654  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2655  ost->st->codec->codec= ost->enc_ctx->codec;
2656  } else {
2657  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2658  if (ret < 0) {
2660  "Error setting up codec context options.\n");
2661  return ret;
2662  }
2663  // copy timebase while removing common factors
2664  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2665  }
2666 
2667  return ret;
2668 }
2669 
2670 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2671  AVCodecContext *avctx)
2672 {
2673  char *p;
2674  int n = 1, i, size, index = 0;
2675  int64_t t, *pts;
2676 
2677  for (p = kf; *p; p++)
2678  if (*p == ',')
2679  n++;
2680  size = n;
2681  pts = av_malloc_array(size, sizeof(*pts));
2682  if (!pts) {
2683  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2684  exit_program(1);
2685  }
2686 
2687  p = kf;
2688  for (i = 0; i < n; i++) {
2689  char *next = strchr(p, ',');
2690 
2691  if (next)
2692  *next++ = 0;
2693 
2694  if (!memcmp(p, "chapters", 8)) {
2695 
2696  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2697  int j;
2698 
2699  if (avf->nb_chapters > INT_MAX - size ||
2700  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2701  sizeof(*pts)))) {
2703  "Could not allocate forced key frames array.\n");
2704  exit_program(1);
2705  }
2706  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2707  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2708 
2709  for (j = 0; j < avf->nb_chapters; j++) {
2710  AVChapter *c = avf->chapters[j];
2711  av_assert1(index < size);
2712  pts[index++] = av_rescale_q(c->start, c->time_base,
2713  avctx->time_base) + t;
2714  }
2715 
2716  } else {
2717 
2718  t = parse_time_or_die("force_key_frames", p, 1);
2719  av_assert1(index < size);
2720  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2721 
2722  }
2723 
2724  p = next;
2725  }
2726 
2727  av_assert0(index == size);
2728  qsort(pts, size, sizeof(*pts), compare_int64);
2729  ost->forced_kf_count = size;
2730  ost->forced_kf_pts = pts;
2731 }
2732 
2733 static void report_new_stream(int input_index, AVPacket *pkt)
2734 {
2735  InputFile *file = input_files[input_index];
2736  AVStream *st = file->ctx->streams[pkt->stream_index];
2737 
2738  if (pkt->stream_index < file->nb_streams_warn)
2739  return;
2740  av_log(file->ctx, AV_LOG_WARNING,
2741  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2743  input_index, pkt->stream_index,
2744  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2745  file->nb_streams_warn = pkt->stream_index + 1;
2746 }
2747 
2749 {
2750  AVDictionaryEntry *e;
2751 
2752  uint8_t *encoder_string;
2753  int encoder_string_len;
2754  int format_flags = 0;
2755  int codec_flags = 0;
2756 
2757  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2758  return;
2759 
2760  e = av_dict_get(of->opts, "fflags", NULL, 0);
2761  if (e) {
2762  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2763  if (!o)
2764  return;
2765  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2766  }
2767  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2768  if (e) {
2769  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2770  if (!o)
2771  return;
2772  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2773  }
2774 
2775  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2776  encoder_string = av_mallocz(encoder_string_len);
2777  if (!encoder_string)
2778  exit_program(1);
2779 
2780  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2781  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2782  else
2783  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2784  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2785  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2787 }
2788 
2789 static int transcode_init(void)
2790 {
2791  int ret = 0, i, j, k;
2792  AVFormatContext *oc;
2793  OutputStream *ost;
2794  InputStream *ist;
2795  char error[1024] = {0};
2796  int want_sdp = 1;
2797 
2798  for (i = 0; i < nb_filtergraphs; i++) {
2799  FilterGraph *fg = filtergraphs[i];
2800  for (j = 0; j < fg->nb_outputs; j++) {
2801  OutputFilter *ofilter = fg->outputs[j];
2802  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2803  continue;
2804  if (fg->nb_inputs != 1)
2805  continue;
2806  for (k = nb_input_streams-1; k >= 0 ; k--)
2807  if (fg->inputs[0]->ist == input_streams[k])
2808  break;
2809  ofilter->ost->source_index = k;
2810  }
2811  }
2812 
2813  /* init framerate emulation */
2814  for (i = 0; i < nb_input_files; i++) {
2815  InputFile *ifile = input_files[i];
2816  if (ifile->rate_emu)
2817  for (j = 0; j < ifile->nb_streams; j++)
2818  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2819  }
2820 
2821  /* for each output stream, we compute the right encoding parameters */
2822  for (i = 0; i < nb_output_streams; i++) {
2823  AVCodecContext *enc_ctx;
2825  ost = output_streams[i];
2826  oc = output_files[ost->file_index]->ctx;
2827  ist = get_input_stream(ost);
2828 
2829  if (ost->attachment_filename)
2830  continue;
2831 
2832  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2833 
2834  if (ist) {
2835  dec_ctx = ist->dec_ctx;
2836 
2837  ost->st->disposition = ist->st->disposition;
2838  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2839  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2840  } else {
2841  for (j=0; j<oc->nb_streams; j++) {
2842  AVStream *st = oc->streams[j];
2843  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2844  break;
2845  }
2846  if (j == oc->nb_streams)
2847  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2849  }
2850 
2851  if (ost->stream_copy) {
2852  AVRational sar;
2853  uint64_t extra_size;
2854 
2855  av_assert0(ist && !ost->filter);
2856 
2857  extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2858 
2859  if (extra_size > INT_MAX) {
2860  return AVERROR(EINVAL);
2861  }
2862 
2863  /* if stream_copy is selected, no need to decode or encode */
2864  enc_ctx->codec_id = dec_ctx->codec_id;
2865  enc_ctx->codec_type = dec_ctx->codec_type;
2866 
2867  if (!enc_ctx->codec_tag) {
2868  unsigned int codec_tag;
2869  if (!oc->oformat->codec_tag ||
2870  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2871  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2872  enc_ctx->codec_tag = dec_ctx->codec_tag;
2873  }
2874 
2875  enc_ctx->bit_rate = dec_ctx->bit_rate;
2876  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2877  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2878  enc_ctx->field_order = dec_ctx->field_order;
2879  if (dec_ctx->extradata_size) {
2880  enc_ctx->extradata = av_mallocz(extra_size);
2881  if (!enc_ctx->extradata) {
2882  return AVERROR(ENOMEM);
2883  }
2884  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2885  }
2886  enc_ctx->extradata_size= dec_ctx->extradata_size;
2887  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2888 
2889  enc_ctx->time_base = ist->st->time_base;
2890  /*
2891  * Avi is a special case here because it supports variable fps but
2892  * having the fps and timebase differe significantly adds quite some
2893  * overhead
2894  */
2895  if(!strcmp(oc->oformat->name, "avi")) {
2896  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2897  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2898  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2899  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2900  || copy_tb==2){
2901  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2902  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2903  enc_ctx->ticks_per_frame = 2;
2904  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2905  && av_q2d(ist->st->time_base) < 1.0/500
2906  || copy_tb==0){
2907  enc_ctx->time_base = dec_ctx->time_base;
2908  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2909  enc_ctx->time_base.den *= 2;
2910  enc_ctx->ticks_per_frame = 2;
2911  }
2912  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2913  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2914  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2915  && strcmp(oc->oformat->name, "f4v")
2916  ) {
2917  if( copy_tb<0 && dec_ctx->time_base.den
2918  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2919  && av_q2d(ist->st->time_base) < 1.0/500
2920  || copy_tb==0){
2921  enc_ctx->time_base = dec_ctx->time_base;
2922  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2923  }
2924  }
2925  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2926  && dec_ctx->time_base.num < dec_ctx->time_base.den
2927  && dec_ctx->time_base.num > 0
2928  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2929  enc_ctx->time_base = dec_ctx->time_base;
2930  }
2931 
2932  if (!ost->frame_rate.num)
2933  ost->frame_rate = ist->framerate;
2934  if(ost->frame_rate.num)
2935  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2936 
2937  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2938  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2939 
2940  if (ist->st->nb_side_data) {
2942  sizeof(*ist->st->side_data));
2943  if (!ost->st->side_data)
2944  return AVERROR(ENOMEM);
2945 
2946  ost->st->nb_side_data = 0;
2947  for (j = 0; j < ist->st->nb_side_data; j++) {
2948  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2949  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2950 
2951  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2952  continue;
2953 
2954  sd_dst->data = av_malloc(sd_src->size);
2955  if (!sd_dst->data)
2956  return AVERROR(ENOMEM);
2957  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2958  sd_dst->size = sd_src->size;
2959  sd_dst->type = sd_src->type;
2960  ost->st->nb_side_data++;
2961  }
2962  }
2963 
2964  ost->parser = av_parser_init(enc_ctx->codec_id);
2965 
2966  switch (enc_ctx->codec_type) {
2967  case AVMEDIA_TYPE_AUDIO:
2968  if (audio_volume != 256) {
2969  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2970  exit_program(1);
2971  }
2972  enc_ctx->channel_layout = dec_ctx->channel_layout;
2973  enc_ctx->sample_rate = dec_ctx->sample_rate;
2974  enc_ctx->channels = dec_ctx->channels;
2975  enc_ctx->frame_size = dec_ctx->frame_size;
2976  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2977  enc_ctx->block_align = dec_ctx->block_align;
2978  enc_ctx->initial_padding = dec_ctx->delay;
2979  enc_ctx->profile = dec_ctx->profile;
2980 #if FF_API_AUDIOENC_DELAY
2981  enc_ctx->delay = dec_ctx->delay;
2982 #endif
2983  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2984  enc_ctx->block_align= 0;
2985  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2986  enc_ctx->block_align= 0;
2987  break;
2988  case AVMEDIA_TYPE_VIDEO:
2989  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2990  enc_ctx->width = dec_ctx->width;
2991  enc_ctx->height = dec_ctx->height;
2992  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2993  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2994  sar =
2996  (AVRational){ enc_ctx->height, enc_ctx->width });
2997  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2998  "with stream copy may produce invalid files\n");
2999  }
3000  else if (ist->st->sample_aspect_ratio.num)
3001  sar = ist->st->sample_aspect_ratio;
3002  else
3003  sar = dec_ctx->sample_aspect_ratio;
3004  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3005  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3006  ost->st->r_frame_rate = ist->st->r_frame_rate;
3007  break;
3008  case AVMEDIA_TYPE_SUBTITLE:
3009  enc_ctx->width = dec_ctx->width;
3010  enc_ctx->height = dec_ctx->height;
3011  break;
3012  case AVMEDIA_TYPE_UNKNOWN:
3013  case AVMEDIA_TYPE_DATA:
3015  break;
3016  default:
3017  abort();
3018  }
3019  } else {
3020  if (!ost->enc)
3021  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3022  if (!ost->enc) {
3023  /* should only happen when a default codec is not present. */
3024  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3025  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3026  ret = AVERROR(EINVAL);
3027  goto dump_format;
3028  }
3029 
3030  set_encoder_id(output_files[ost->file_index], ost);
3031 
3032 #if CONFIG_LIBMFX
3033  if (qsv_transcode_init(ost))
3034  exit_program(1);
3035 #endif
3036 
3037  if (!ost->filter &&
3038  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3039  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3040  FilterGraph *fg;
3041  fg = init_simple_filtergraph(ist, ost);
3042  if (configure_filtergraph(fg)) {
3043  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3044  exit_program(1);
3045  }
3046  }
3047 
3048  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3049  if (!ost->frame_rate.num)
3051  if (ist && !ost->frame_rate.num)
3052  ost->frame_rate = ist->framerate;
3053  if (ist && !ost->frame_rate.num)
3054  ost->frame_rate = ist->st->r_frame_rate;
3055  if (ist && !ost->frame_rate.num) {
3056  ost->frame_rate = (AVRational){25, 1};
3058  "No information "
3059  "about the input framerate is available. Falling "
3060  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3061  "if you want a different framerate.\n",
3062  ost->file_index, ost->index);
3063  }
3064 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3065  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3067  ost->frame_rate = ost->enc->supported_framerates[idx];
3068  }
3069  // reduce frame rate for mpeg4 to be within the spec limits
3070  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3071  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3072  ost->frame_rate.num, ost->frame_rate.den, 65535);
3073  }
3074  }
3075 
3076  switch (enc_ctx->codec_type) {
3077  case AVMEDIA_TYPE_AUDIO:
3078  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3079  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3080  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3081  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3082  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3083  break;
3084  case AVMEDIA_TYPE_VIDEO:
3085  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3086  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3087  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3088  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3090  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3091  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3092  }
3093  for (j = 0; j < ost->forced_kf_count; j++)
3094  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3096  enc_ctx->time_base);
3097 
3098  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3099  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3100  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3101  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3102  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3104  if (!strncmp(ost->enc->name, "libx264", 7) &&
3105  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3108  "No pixel format specified, %s for H.264 encoding chosen.\n"
3109  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3111  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3112  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3115  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3116  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3118  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3119 
3120  ost->st->avg_frame_rate = ost->frame_rate;
3121 
3122  if (!dec_ctx ||
3123  enc_ctx->width != dec_ctx->width ||
3124  enc_ctx->height != dec_ctx->height ||
3125  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3126  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3127  }
3128 
3129  if (ost->forced_keyframes) {
3130  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3133  if (ret < 0) {
3135  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3136  return ret;
3137  }
3142 
3143  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3144  // parse it only for static kf timings
3145  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3147  }
3148  }
3149  break;
3150  case AVMEDIA_TYPE_SUBTITLE:
3151  enc_ctx->time_base = (AVRational){1, 1000};
3152  if (!enc_ctx->width) {
3153  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3154  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3155  }
3156  break;
3157  case AVMEDIA_TYPE_DATA:
3158  break;
3159  default:
3160  abort();
3161  break;
3162  }
3163  }
3164 
3165  if (ost->disposition) {
3166  static const AVOption opts[] = {
3167  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3168  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3169  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3170  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3171  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3172  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3173  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3174  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3175  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3176  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3177  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3178  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3179  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3180  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3181  { NULL },
3182  };
3183  static const AVClass class = {
3184  .class_name = "",
3185  .item_name = av_default_item_name,
3186  .option = opts,
3187  .version = LIBAVUTIL_VERSION_INT,
3188  };
3189  const AVClass *pclass = &class;
3190 
3191  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3192  if (ret < 0)
3193  goto dump_format;
3194  }
3195  }
3196 
3197  /* open each encoder */
3198  for (i = 0; i < nb_output_streams; i++) {
3199  ret = init_output_stream(output_streams[i], error, sizeof(error));
3200  if (ret < 0)
3201  goto dump_format;
3202  }
3203 
3204  /* init input streams */
3205  for (i = 0; i < nb_input_streams; i++)
3206  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3207  for (i = 0; i < nb_output_streams; i++) {
3208  ost = output_streams[i];
3209  avcodec_close(ost->enc_ctx);
3210  }
3211  goto dump_format;
3212  }
3213 
3214  /* discard unused programs */
3215  for (i = 0; i < nb_input_files; i++) {
3216  InputFile *ifile = input_files[i];
3217  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3218  AVProgram *p = ifile->ctx->programs[j];
3219  int discard = AVDISCARD_ALL;
3220 
3221  for (k = 0; k < p->nb_stream_indexes; k++)
3222  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3223  discard = AVDISCARD_DEFAULT;
3224  break;
3225  }
3226  p->discard = discard;
3227  }
3228  }
3229 
3230  /* open files and write file headers */
3231  for (i = 0; i < nb_output_files; i++) {
3232  oc = output_files[i]->ctx;
3233  oc->interrupt_callback = int_cb;
3234  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3235  snprintf(error, sizeof(error),
3236  "Could not write header for output file #%d "
3237  "(incorrect codec parameters ?): %s",
3238  i, av_err2str(ret));
3239  ret = AVERROR(EINVAL);
3240  goto dump_format;
3241  }
3242 // assert_avoptions(output_files[i]->opts);
3243  if (strcmp(oc->oformat->name, "rtp")) {
3244  want_sdp = 0;
3245  }
3246  }
3247 
3248  dump_format:
3249  /* dump the file output parameters - cannot be done before in case
3250  of stream copy */
3251  for (i = 0; i < nb_output_files; i++) {
3252  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3253  }
3254 
3255  /* dump the stream mapping */
3256  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3257  for (i = 0; i < nb_input_streams; i++) {
3258  ist = input_streams[i];
3259 
3260  for (j = 0; j < ist->nb_filters; j++) {
3261  if (ist->filters[j]->graph->graph_desc) {
3262  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3263  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3264  ist->filters[j]->name);
3265  if (nb_filtergraphs > 1)
3266  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3267  av_log(NULL, AV_LOG_INFO, "\n");
3268  }
3269  }
3270  }
3271 
3272  for (i = 0; i < nb_output_streams; i++) {
3273  ost = output_streams[i];
3274 
3275  if (ost->attachment_filename) {
3276  /* an attached file */
3277  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3278  ost->attachment_filename, ost->file_index, ost->index);
3279  continue;
3280  }
3281 
3282  if (ost->filter && ost->filter->graph->graph_desc) {
3283  /* output from a complex graph */
3284  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3285  if (nb_filtergraphs > 1)
3286  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3287 
3288  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3289  ost->index, ost->enc ? ost->enc->name : "?");
3290  continue;
3291  }
3292 
3293  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3294  input_streams[ost->source_index]->file_index,
3295  input_streams[ost->source_index]->st->index,
3296  ost->file_index,
3297  ost->index);
3298  if (ost->sync_ist != input_streams[ost->source_index])
3299  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3300  ost->sync_ist->file_index,
3301  ost->sync_ist->st->index);
3302  if (ost->stream_copy)
3303  av_log(NULL, AV_LOG_INFO, " (copy)");
3304  else {
3305  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3306  const AVCodec *out_codec = ost->enc;
3307  const char *decoder_name = "?";
3308  const char *in_codec_name = "?";
3309  const char *encoder_name = "?";
3310  const char *out_codec_name = "?";
3311  const AVCodecDescriptor *desc;
3312 
3313  if (in_codec) {
3314  decoder_name = in_codec->name;
3315  desc = avcodec_descriptor_get(in_codec->id);
3316  if (desc)
3317  in_codec_name = desc->name;
3318  if (!strcmp(decoder_name, in_codec_name))
3319  decoder_name = "native";
3320  }
3321 
3322  if (out_codec) {
3323  encoder_name = out_codec->name;
3324  desc = avcodec_descriptor_get(out_codec->id);
3325  if (desc)
3326  out_codec_name = desc->name;
3327  if (!strcmp(encoder_name, out_codec_name))
3328  encoder_name = "native";
3329  }
3330 
3331  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3332  in_codec_name, decoder_name,
3333  out_codec_name, encoder_name);
3334  }
3335  av_log(NULL, AV_LOG_INFO, "\n");
3336  }
3337 
3338  if (ret) {
3339  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3340  return ret;
3341  }
3342 
3343  if (sdp_filename || want_sdp) {
3344  print_sdp();
3345  }
3346 
3347  transcode_init_done = 1;
3348 
3349  return 0;
3350 }
3351 
3352 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3353 static int need_output(void)
3354 {
3355  int i;
3356 
3357  for (i = 0; i < nb_output_streams; i++) {
3358  OutputStream *ost = output_streams[i];
3359  OutputFile *of = output_files[ost->file_index];
3360  AVFormatContext *os = output_files[ost->file_index]->ctx;
3361 
3362  if (ost->finished ||
3363  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3364  continue;
3365  if (ost->frame_number >= ost->max_frames) {
3366  int j;
3367  for (j = 0; j < of->ctx->nb_streams; j++)
3368  close_output_stream(output_streams[of->ost_index + j]);
3369  continue;
3370  }
3371 
3372  return 1;
3373  }
3374 
3375  return 0;
3376 }
3377 
3378 /**
3379  * Select the output stream to process.
3380  *
3381  * @return selected output stream, or NULL if none available
3382  */
3384 {
3385  int i;
3386  int64_t opts_min = INT64_MAX;
3387  OutputStream *ost_min = NULL;
3388 
3389  for (i = 0; i < nb_output_streams; i++) {
3390  OutputStream *ost = output_streams[i];
3391  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3392  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3393  AV_TIME_BASE_Q);
3394  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3395  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3396 
3397  if (!ost->finished && opts < opts_min) {
3398  opts_min = opts;
3399  ost_min = ost->unavailable ? NULL : ost;
3400  }
3401  }
3402  return ost_min;
3403 }
3404 
3405 static void set_tty_echo(int on)
3406 {
3407 #if HAVE_TERMIOS_H
3408  struct termios tty;
3409  if (tcgetattr(0, &tty) == 0) {
3410  if (on) tty.c_lflag |= ECHO;
3411  else tty.c_lflag &= ~ECHO;
3412  tcsetattr(0, TCSANOW, &tty);
3413  }
3414 #endif
3415 }
3416 
3418 {
3419  int i, ret, key;
3420  static int64_t last_time;
3421  if (received_nb_signals)
3422  return AVERROR_EXIT;
3423  /* read_key() returns 0 on EOF */
3424  if(cur_time - last_time >= 100000 && !run_as_daemon){
3425  key = read_key();
3426  last_time = cur_time;
3427  }else
3428  key = -1;
3429  if (key == 'q')
3430  return AVERROR_EXIT;
3431  if (key == '+') av_log_set_level(av_log_get_level()+10);
3432  if (key == '-') av_log_set_level(av_log_get_level()-10);
3433  if (key == 's') qp_hist ^= 1;
3434  if (key == 'h'){
3435  if (do_hex_dump){
3436  do_hex_dump = do_pkt_dump = 0;
3437  } else if(do_pkt_dump){
3438  do_hex_dump = 1;
3439  } else
3440  do_pkt_dump = 1;
3442  }
3443  if (key == 'c' || key == 'C'){
3444  char buf[4096], target[64], command[256], arg[256] = {0};
3445  double time;
3446  int k, n = 0;
3447  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3448  i = 0;
3449  set_tty_echo(1);
3450  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3451  if (k > 0)
3452  buf[i++] = k;
3453  buf[i] = 0;
3454  set_tty_echo(0);
3455  fprintf(stderr, "\n");
3456  if (k > 0 &&
3457  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3458  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3459  target, time, command, arg);
3460  for (i = 0; i < nb_filtergraphs; i++) {
3461  FilterGraph *fg = filtergraphs[i];
3462  if (fg->graph) {
3463  if (time < 0) {
3464  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3465  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3466  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3467  } else if (key == 'c') {
3468  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3469  ret = AVERROR_PATCHWELCOME;
3470  } else {
3471  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3472  if (ret < 0)
3473  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3474  }
3475  }
3476  }
3477  } else {
3479  "Parse error, at least 3 arguments were expected, "
3480  "only %d given in string '%s'\n", n, buf);
3481  }
3482  }
3483  if (key == 'd' || key == 'D'){
3484  int debug=0;
3485  if(key == 'D') {
3486  debug = input_streams[0]->st->codec->debug<<1;
3487  if(!debug) debug = 1;
3488  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3489  debug += debug;
3490  }else{
3491  char buf[32];
3492  int k = 0;
3493  i = 0;
3494  set_tty_echo(1);
3495  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3496  if (k > 0)
3497  buf[i++] = k;
3498  buf[i] = 0;
3499  set_tty_echo(0);
3500  fprintf(stderr, "\n");
3501  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3502  fprintf(stderr,"error parsing debug value\n");
3503  }
3504  for(i=0;i<nb_input_streams;i++) {
3505  input_streams[i]->st->codec->debug = debug;
3506  }
3507  for(i=0;i<nb_output_streams;i++) {
3508  OutputStream *ost = output_streams[i];
3509  ost->enc_ctx->debug = debug;
3510  }
3511  if(debug) av_log_set_level(AV_LOG_DEBUG);
3512  fprintf(stderr,"debug=%d\n", debug);
3513  }
3514  if (key == '?'){
3515  fprintf(stderr, "key function\n"
3516  "? show this help\n"
3517  "+ increase verbosity\n"
3518  "- decrease verbosity\n"
3519  "c Send command to first matching filter supporting it\n"
3520  "C Send/Que command to all matching filters\n"
3521  "D cycle through available debug modes\n"
3522  "h dump packets/hex press to cycle through the 3 states\n"
3523  "q quit\n"
3524  "s Show QP histogram\n"
3525  );
3526  }
3527  return 0;
3528 }
3529 
3530 #if HAVE_PTHREADS
3531 static void *input_thread(void *arg)
3532 {
3533  InputFile *f = arg;
3534  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3535  int ret = 0;
3536 
3537  while (1) {
3538  AVPacket pkt;
3539  ret = av_read_frame(f->ctx, &pkt);
3540 
3541  if (ret == AVERROR(EAGAIN)) {
3542  av_usleep(10000);
3543  continue;
3544  }
3545  if (ret < 0) {
3546  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3547  break;
3548  }
3549  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3550  if (flags && ret == AVERROR(EAGAIN)) {
3551  flags = 0;
3552  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3554  "Thread message queue blocking; consider raising the "
3555  "thread_queue_size option (current value: %d)\n",
3556  f->thread_queue_size);
3557  }
3558  if (ret < 0) {
3559  if (ret != AVERROR_EOF)
3560  av_log(f->ctx, AV_LOG_ERROR,
3561  "Unable to send packet to main thread: %s\n",
3562  av_err2str(ret));
3563  av_packet_unref(&pkt);
3564  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3565  break;
3566  }
3567  }
3568 
3569  return NULL;
3570 }
3571 
3572 static void free_input_threads(void)
3573 {
3574  int i;
3575 
3576  for (i = 0; i < nb_input_files; i++) {
3577  InputFile *f = input_files[i];
3578  AVPacket pkt;
3579 
3580  if (!f || !f->in_thread_queue)
3581  continue;
3583  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3584  av_packet_unref(&pkt);
3585 
3586  pthread_join(f->thread, NULL);
3587  f->joined = 1;
3588  av_thread_message_queue_free(&f->in_thread_queue);
3589  }
3590 }
3591 
3592 static int init_input_threads(void)
3593 {
3594  int i, ret;
3595 
3596  if (nb_input_files == 1)
3597  return 0;
3598 
3599  for (i = 0; i < nb_input_files; i++) {
3600  InputFile *f = input_files[i];
3601 
3602  if (f->ctx->pb ? !f->ctx->pb->seekable :
3603  strcmp(f->ctx->iformat->name, "lavfi"))
3604  f->non_blocking = 1;
3605  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3606  f->thread_queue_size, sizeof(AVPacket));
3607  if (ret < 0)
3608  return ret;
3609 
3610  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3611  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3612  av_thread_message_queue_free(&f->in_thread_queue);
3613  return AVERROR(ret);
3614  }
3615  }
3616  return 0;
3617 }
3618 
3619 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3620 {
3621  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3622  f->non_blocking ?
3624 }
3625 #endif
3626 
3628 {
3629  if (f->rate_emu) {
3630  int i;
3631  for (i = 0; i < f->nb_streams; i++) {
3632  InputStream *ist = input_streams[f->ist_index + i];
3633  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3634  int64_t now = av_gettime_relative() - ist->start;
3635  if (pts > now)
3636  return AVERROR(EAGAIN);
3637  }
3638  }
3639 
3640 #if HAVE_PTHREADS
3641  if (nb_input_files > 1)
3642  return get_input_packet_mt(f, pkt);
3643 #endif
3644  return av_read_frame(f->ctx, pkt);
3645 }
3646 
3647 static int got_eagain(void)
3648 {
3649  int i;
3650  for (i = 0; i < nb_output_streams; i++)
3651  if (output_streams[i]->unavailable)
3652  return 1;
3653  return 0;
3654 }
3655 
3656 static void reset_eagain(void)
3657 {
3658  int i;
3659  for (i = 0; i < nb_input_files; i++)
3660  input_files[i]->eagain = 0;
3661  for (i = 0; i < nb_output_streams; i++)
3662  output_streams[i]->unavailable = 0;
3663 }
3664 
3665 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3666 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3667  AVRational time_base)
3668 {
3669  int ret;
3670 
3671  if (!*duration) {
3672  *duration = tmp;
3673  return tmp_time_base;
3674  }
3675 
3676  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3677  if (ret < 0) {
3678  *duration = tmp;
3679  return tmp_time_base;
3680  }
3681 
3682  return time_base;
3683 }
3684 
3685 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3686 {
3687  InputStream *ist;
3688  AVCodecContext *avctx;
3689  int i, ret, has_audio = 0;
3690  int64_t duration = 0;
3691 
3692  ret = av_seek_frame(is, -1, is->start_time, 0);
3693  if (ret < 0)
3694  return ret;
3695 
3696  for (i = 0; i < ifile->nb_streams; i++) {
3697  ist = input_streams[ifile->ist_index + i];
3698  avctx = ist->dec_ctx;
3699 
3700  // flush decoders
3701  if (ist->decoding_needed) {
3702  process_input_packet(ist, NULL, 1);
3703  avcodec_flush_buffers(avctx);
3704  }
3705 
3706  /* duration is the length of the last frame in a stream
3707  * when audio stream is present we don't care about
3708  * last video frame length because it's not defined exactly */
3709  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3710  has_audio = 1;
3711  }
3712 
3713  for (i = 0; i < ifile->nb_streams; i++) {
3714  ist = input_streams[ifile->ist_index + i];
3715  avctx = ist->dec_ctx;
3716 
3717  if (has_audio) {
3718  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3719  AVRational sample_rate = {1, avctx->sample_rate};
3720 
3721  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3722  } else
3723  continue;
3724  } else {
3725  if (ist->framerate.num) {
3726  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3727  } else if (ist->st->avg_frame_rate.num) {
3728  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3729  } else duration = 1;
3730  }
3731  if (!ifile->duration)
3732  ifile->time_base = ist->st->time_base;
3733  /* the total duration of the stream, max_pts - min_pts is
3734  * the duration of the stream without the last frame */
3735  duration += ist->max_pts - ist->min_pts;
3736  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3737  ifile->time_base);
3738  }
3739 
3740  if (ifile->loop > 0)
3741  ifile->loop--;
3742 
3743  return ret;
3744 }
3745 
3746 /*
3747  * Return
3748  * - 0 -- one packet was read and processed
3749  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3750  * this function should be called again
3751  * - AVERROR_EOF -- this function should not be called again
3752  */
3753 static int process_input(int file_index)
3754 {
3755  InputFile *ifile = input_files[file_index];
3756  AVFormatContext *is;
3757  InputStream *ist;
3758  AVPacket pkt;
3759  int ret, i, j;
3760  int64_t duration;
3761  int64_t pkt_dts;
3762 
3763  is = ifile->ctx;
3764  ret = get_input_packet(ifile, &pkt);
3765 
3766  if (ret == AVERROR(EAGAIN)) {
3767  ifile->eagain = 1;
3768  return ret;
3769  }
3770  if (ret < 0 && ifile->loop) {
3771  if ((ret = seek_to_start(ifile, is)) < 0)
3772  return ret;
3773  ret = get_input_packet(ifile, &pkt);
3774  }
3775  if (ret < 0) {
3776  if (ret != AVERROR_EOF) {
3777  print_error(is->filename, ret);
3778  if (exit_on_error)
3779  exit_program(1);
3780  }
3781 
3782  for (i = 0; i < ifile->nb_streams; i++) {
3783  ist = input_streams[ifile->ist_index + i];
3784  if (ist->decoding_needed) {
3785  ret = process_input_packet(ist, NULL, 0);
3786  if (ret>0)
3787  return 0;
3788  }
3789 
3790  /* mark all outputs that don't go through lavfi as finished */
3791  for (j = 0; j < nb_output_streams; j++) {
3792  OutputStream *ost = output_streams[j];
3793 
3794  if (ost->source_index == ifile->ist_index + i &&
3795  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3796  finish_output_stream(ost);
3797  }
3798  }
3799 
3800  ifile->eof_reached = 1;
3801  return AVERROR(EAGAIN);
3802  }
3803 
3804  reset_eagain();
3805 
3806  if (do_pkt_dump) {
3808  is->streams[pkt.stream_index]);
3809  }
3810  /* the following test is needed in case new streams appear
3811  dynamically in stream : we ignore them */
3812  if (pkt.stream_index >= ifile->nb_streams) {
3813  report_new_stream(file_index, &pkt);
3814  goto discard_packet;
3815  }
3816 
3817  ist = input_streams[ifile->ist_index + pkt.stream_index];
3818 
3819  ist->data_size += pkt.size;
3820  ist->nb_packets++;
3821 
3822  if (ist->discard)
3823  goto discard_packet;
3824 
3825  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3826  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3827  exit_program(1);
3828  }
3829 
3830  if (debug_ts) {
3831  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3832  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3836  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3837  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3838  av_ts2str(input_files[ist->file_index]->ts_offset),
3839  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3840  }
3841 
3842  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3843  int64_t stime, stime2;
3844  // Correcting starttime based on the enabled streams
3845  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3846  // so we instead do it here as part of discontinuity handling
3847  if ( ist->next_dts == AV_NOPTS_VALUE
3848  && ifile->ts_offset == -is->start_time
3849  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3850  int64_t new_start_time = INT64_MAX;
3851  for (i=0; i<is->nb_streams; i++) {
3852  AVStream *st = is->streams[i];
3853  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3854  continue;
3855  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3856  }
3857  if (new_start_time > is->start_time) {
3858  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3859  ifile->ts_offset = -new_start_time;
3860  }
3861  }
3862 
3863  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3864  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3865  ist->wrap_correction_done = 1;
3866 
3867  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3868  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3869  ist->wrap_correction_done = 0;
3870  }
3871  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3872  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3873  ist->wrap_correction_done = 0;
3874  }
3875  }
3876 
3877  /* add the stream-global side data to the first packet */
3878  if (ist->nb_packets == 1) {
3879  if (ist->st->nb_side_data)
3881  for (i = 0; i < ist->st->nb_side_data; i++) {
3882  AVPacketSideData *src_sd = &ist->st->side_data[i];
3883  uint8_t *dst_data;
3884 
3885  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3886  continue;
3887  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3888  continue;
3889 
3890  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3891  if (!dst_data)
3892  exit_program(1);
3893 
3894  memcpy(dst_data, src_sd->data, src_sd->size);
3895  }
3896  }
3897 
3898  if (pkt.dts != AV_NOPTS_VALUE)
3899  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3900  if (pkt.pts != AV_NOPTS_VALUE)
3901  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3902 
3903  if (pkt.pts != AV_NOPTS_VALUE)
3904  pkt.pts *= ist->ts_scale;
3905  if (pkt.dts != AV_NOPTS_VALUE)
3906  pkt.dts *= ist->ts_scale;
3907 
3909  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3911  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3912  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3913  int64_t delta = pkt_dts - ifile->last_ts;
3914  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3915  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3916  ifile->ts_offset -= delta;
3918  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3919  delta, ifile->ts_offset);
3920  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3921  if (pkt.pts != AV_NOPTS_VALUE)
3922  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3923  }
3924  }
3925 
3926  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3927  if (pkt.pts != AV_NOPTS_VALUE) {
3928  pkt.pts += duration;
3929  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3930  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3931  }
3932 
3933  if (pkt.dts != AV_NOPTS_VALUE)
3934  pkt.dts += duration;
3935 
3937  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3939  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3940  !copy_ts) {
3941  int64_t delta = pkt_dts - ist->next_dts;
3942  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3943  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3944  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3945  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3946  ifile->ts_offset -= delta;
3948  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3949  delta, ifile->ts_offset);
3950  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3951  if (pkt.pts != AV_NOPTS_VALUE)
3952  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3953  }
3954  } else {
3955  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3956  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3957  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3958  pkt.dts = AV_NOPTS_VALUE;
3959  }
3960  if (pkt.pts != AV_NOPTS_VALUE){
3961  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3962  delta = pkt_pts - ist->next_dts;
3963  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3964  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3965  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3966  pkt.pts = AV_NOPTS_VALUE;
3967  }
3968  }
3969  }
3970  }
3971 
3972  if (pkt.dts != AV_NOPTS_VALUE)
3973  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3974 
3975  if (debug_ts) {
3976  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3978  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3979  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3980  av_ts2str(input_files[ist->file_index]->ts_offset),
3981  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3982  }
3983 
3984  sub2video_heartbeat(ist, pkt.pts);
3985 
3986  process_input_packet(ist, &pkt, 0);
3987 
3988 discard_packet:
3989  av_packet_unref(&pkt);
3990 
3991  return 0;
3992 }
3993 
3994 /**
3995  * Perform a step of transcoding for the specified filter graph.
3996  *
3997  * @param[in] graph filter graph to consider
3998  * @param[out] best_ist input stream where a frame would allow to continue
3999  * @return 0 for success, <0 for error
4000  */
4001 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4002 {
4003  int i, ret;
4004  int nb_requests, nb_requests_max = 0;
4005  InputFilter *ifilter;
4006  InputStream *ist;
4007 
4008  *best_ist = NULL;
4009  ret = avfilter_graph_request_oldest(graph->graph);
4010  if (ret >= 0)
4011  return reap_filters(0);
4012 
4013  if (ret == AVERROR_EOF) {
4014  ret = reap_filters(1);
4015  for (i = 0; i < graph->nb_outputs; i++)
4016  close_output_stream(graph->outputs[i]->ost);
4017  return ret;
4018  }
4019  if (ret != AVERROR(EAGAIN))
4020  return ret;
4021 
4022  for (i = 0; i < graph->nb_inputs; i++) {
4023  ifilter = graph->inputs[i];
4024  ist = ifilter->ist;
4025  if (input_files[ist->file_index]->eagain ||
4026  input_files[ist->file_index]->eof_reached)
4027  continue;
4028  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4029  if (nb_requests > nb_requests_max) {
4030  nb_requests_max = nb_requests;
4031  *best_ist = ist;
4032  }
4033  }
4034 
4035  if (!*best_ist)
4036  for (i = 0; i < graph->nb_outputs; i++)
4037  graph->outputs[i]->ost->unavailable = 1;
4038 
4039  return 0;
4040 }
4041 
4042 /**
4043  * Run a single step of transcoding.
4044  *
4045  * @return 0 for success, <0 for error
4046  */
4047 static int transcode_step(void)
4048 {
4049  OutputStream *ost;
4050  InputStream *ist;
4051  int ret;
4052 
4053  ost = choose_output();
4054  if (!ost) {
4055  if (got_eagain()) {
4056  reset_eagain();
4057  av_usleep(10000);
4058  return 0;
4059  }
4060  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4061  return AVERROR_EOF;
4062  }
4063 
4064  if (ost->filter) {
4065  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4066  return ret;
4067  if (!ist)
4068  return 0;
4069  } else {
4070  av_assert0(ost->source_index >= 0);
4071  ist = input_streams[ost->source_index];
4072  }
4073 
4074  ret = process_input(ist->file_index);
4075  if (ret == AVERROR(EAGAIN)) {
4076  if (input_files[ist->file_index]->eagain)
4077  ost->unavailable = 1;
4078  return 0;
4079  }
4080 
4081  if (ret < 0)
4082  return ret == AVERROR_EOF ? 0 : ret;
4083 
4084  return reap_filters(0);
4085 }
4086 
4087 /*
4088  * The following code is the main loop of the file converter
4089  */
4090 static int transcode(void)
4091 {
4092  int ret, i;
4093  AVFormatContext *os;
4094  OutputStream *ost;
4095  InputStream *ist;
4096  int64_t timer_start;
4097  int64_t total_packets_written = 0;
4098 
4099  ret = transcode_init();
4100  if (ret < 0)
4101  goto fail;
4102 
4103  if (stdin_interaction) {
4104  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4105  }
4106 
4107  timer_start = av_gettime_relative();
4108 
4109 #if HAVE_PTHREADS
4110  if ((ret = init_input_threads()) < 0)
4111  goto fail;
4112 #endif
4113 
4114  while (!received_sigterm) {
4115  int64_t cur_time= av_gettime_relative();
4116 
4117  /* if 'q' pressed, exits */
4118  if (stdin_interaction)
4119  if (check_keyboard_interaction(cur_time) < 0)
4120  break;
4121 
4122  /* check if there's any stream where output is still needed */
4123  if (!need_output()) {
4124  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4125  break;
4126  }
4127 
4128  ret = transcode_step();
4129  if (ret < 0 && ret != AVERROR_EOF) {
4130  char errbuf[128];
4131  av_strerror(ret, errbuf, sizeof(errbuf));
4132 
4133  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4134  break;
4135  }
4136 
4137  /* dump report by using the output first video and audio streams */
4138  print_report(0, timer_start, cur_time);
4139  }
4140 #if HAVE_PTHREADS
4141  free_input_threads();
4142 #endif
4143 
4144  /* at the end of stream, we must flush the decoder buffers */
4145  for (i = 0; i < nb_input_streams; i++) {
4146  ist = input_streams[i];
4147  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4148  process_input_packet(ist, NULL, 0);
4149  }
4150  }
4151  flush_encoders();
4152 
4153  term_exit();
4154 
4155  /* write the trailer if needed and close file */
4156  for (i = 0; i < nb_output_files; i++) {
4157  os = output_files[i]->ctx;
4158  if ((ret = av_write_trailer(os)) < 0) {
4159  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4160  if (exit_on_error)
4161  exit_program(1);
4162  }
4163  }
4164 
4165  /* dump report by using the first video and audio streams */
4166  print_report(1, timer_start, av_gettime_relative());
4167 
4168  /* close each encoder */
4169  for (i = 0; i < nb_output_streams; i++) {
4170  ost = output_streams[i];
4171  if (ost->encoding_needed) {
4172  av_freep(&ost->enc_ctx->stats_in);
4173  }
4174  total_packets_written += ost->packets_written;
4175  }
4176 
4177  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4178  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4179  exit_program(1);
4180  }
4181 
4182  /* close each decoder */
4183  for (i = 0; i < nb_input_streams; i++) {
4184  ist = input_streams[i];
4185  if (ist->decoding_needed) {
4186  avcodec_close(ist->dec_ctx);
4187  if (ist->hwaccel_uninit)
4188  ist->hwaccel_uninit(ist->dec_ctx);
4189  }
4190  }
4191 
4192  /* finished ! */
4193  ret = 0;
4194 
4195  fail:
4196 #if HAVE_PTHREADS
4197  free_input_threads();
4198 #endif
4199 
4200  if (output_streams) {
4201  for (i = 0; i < nb_output_streams; i++) {
4202  ost = output_streams[i];
4203  if (ost) {
4204  if (ost->logfile) {
4205  if (fclose(ost->logfile))
4207  "Error closing logfile, loss of information possible: %s\n",
4208  av_err2str(AVERROR(errno)));
4209  ost->logfile = NULL;
4210  }
4211  av_freep(&ost->forced_kf_pts);
4212  av_freep(&ost->apad);
4213  av_freep(&ost->disposition);
4214  av_dict_free(&ost->encoder_opts);
4215  av_dict_free(&ost->sws_dict);
4216  av_dict_free(&ost->swr_opts);
4217  av_dict_free(&ost->resample_opts);
4218  }
4219  }
4220  }
4221  return ret;
4222 }
4223 
4224 
4225 static int64_t getutime(void)
4226 {
4227 #if HAVE_GETRUSAGE
4228  struct rusage rusage;
4229 
4230  getrusage(RUSAGE_SELF, &rusage);
4231  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4232 #elif HAVE_GETPROCESSTIMES
4233  HANDLE proc;
4234  FILETIME c, e, k, u;
4235  proc = GetCurrentProcess();
4236  GetProcessTimes(proc, &c, &e, &k, &u);
4237  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4238 #else
4239  return av_gettime_relative();
4240 #endif
4241 }
4242 
4243 static int64_t getmaxrss(void)
4244 {
4245 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4246  struct rusage rusage;
4247  getrusage(RUSAGE_SELF, &rusage);
4248  return (int64_t)rusage.ru_maxrss * 1024;
4249 #elif HAVE_GETPROCESSMEMORYINFO
4250  HANDLE proc;
4251  PROCESS_MEMORY_COUNTERS memcounters;
4252  proc = GetCurrentProcess();
4253  memcounters.cb = sizeof(memcounters);
4254  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4255  return memcounters.PeakPagefileUsage;
4256 #else
4257  return 0;
4258 #endif
4259 }
4260 
4261 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4262 {
4263 }
4264 
4265 int main(int argc, char **argv)
4266 {
4267  int ret;
4268  int64_t ti;
4269 
4271 
4272  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4273 
4275  parse_loglevel(argc, argv, options);
4276 
4277  if(argc>1 && !strcmp(argv[1], "-d")){
4278  run_as_daemon=1;
4280  argc--;
4281  argv++;
4282  }
4283 
4285 #if CONFIG_AVDEVICE
4287 #endif
4289  av_register_all();
4291 
4292  show_banner(argc, argv, options);
4293 
4294  term_init();
4295 
4296  /* parse options and open all input/output files */
4297  ret = ffmpeg_parse_options(argc, argv);
4298  if (ret < 0)
4299  exit_program(1);
4300 
4301  if (nb_output_files <= 0 && nb_input_files == 0) {
4302  show_usage();
4303  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4304  exit_program(1);
4305  }
4306 
4307  /* file converter / grab */
4308  if (nb_output_files <= 0) {
4309  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4310  exit_program(1);
4311  }
4312 
4313 // if (nb_input_files == 0) {
4314 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4315 // exit_program(1);
4316 // }
4317 
4318  current_time = ti = getutime();
4319  if (transcode() < 0)
4320  exit_program(1);
4321  ti = getutime() - ti;
4322  if (do_benchmark) {
4323  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4324  }
4325  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4328  exit_program(69);
4329 
4331  return main_return_code;
4332 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1517
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:275
#define extra_bits(eb)
Definition: intrax8.c:152
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:796
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2786
int got_output
Definition: ffmpeg.h:304
#define AV_DISPOSITION_METADATA
Definition: avformat.h:861
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1786
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1053
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1899
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:403
const struct AVCodec * codec
Definition: avcodec.h:1541
Definition: ffmpeg.h:380
AVRational framerate
Definition: avcodec.h:3212
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:330
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:493
void term_init(void)
Definition: ffmpeg.c:366
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:287
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:234
int nb_outputs
Definition: ffmpeg.h:251
AVDictionary * swr_opts
Definition: ffmpeg.h:455
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:261
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2157
int resample_channels
Definition: ffmpeg.h:299
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:181
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:308
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2610
int stream_copy
Definition: ffmpeg.h:460
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1016
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3699
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1566
AVOption.
Definition: opt.h:245
AVRational frame_rate
Definition: ffmpeg.h:425
int64_t * forced_kf_pts
Definition: ffmpeg.h:434
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:288
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2862
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:450
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:775
#define CODEC_FLAG_PASS2
Definition: avcodec.h:977
static int process_input(int file_index)
Definition: ffmpeg.c:3753
int exit_on_error
Definition: ffmpeg_opt.c:108
AVFormatContext * ctx
Definition: movenc-test.c:48
const char * fmt
Definition: avisynth_c.h:632
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2579
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1597
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1487
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
static int run_as_daemon
Definition: ffmpeg.c:127
Memory buffer source API.
void av_log_set_level(int level)
Set the log level.
Definition: log.c:382
AVRational framerate
Definition: ffmpeg.h:288
AVCodecParserContext * parser
Definition: ffmpeg.h:468
static int64_t cur_time
Definition: ffserver.c:262
int64_t max_pts
Definition: ffmpeg.h:281
int decoding_needed
Definition: ffmpeg.h:259
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:949
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1495
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:429
int index
stream index in AVFormatContext
Definition: avformat.h:878
int size
Definition: avcodec.h:1468
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4243
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:128
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2567
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:538
#define AV_DISPOSITION_DUB
Definition: avformat.h:831
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1935
int eagain
Definition: ffmpeg.h:351
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Immediately push the frame to the output.
Definition: buffersrc.h:48
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1133
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1752
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:413
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:604
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:843
int quality
Definition: ffmpeg.h:480
unsigned num_rects
Definition: avcodec.h:3737
AVFrame * filter_frame
Definition: ffmpeg.h:266
static int transcode_init(void)
Definition: ffmpeg.c:2789
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2574
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2267
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2700
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:101
enum AVMediaType type
Definition: avcodec.h:3405
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:845
int last_dropped
Definition: ffmpeg.h:419
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:657
discard all
Definition: avcodec.h:688
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:989
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:333
int64_t input_ts_offset
Definition: ffmpeg.h:357
int do_hex_dump
Definition: ffmpeg_opt.c:102
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1707
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2924
int nb_input_streams
Definition: ffmpeg.c:138
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
const char * name
Definition: ffmpeg.h:71
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2416
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2733
Picture data structure.
Definition: avcodec.h:3666
int profile
profile
Definition: avcodec.h:3028
uint64_t packets_written
Definition: ffmpeg.h:474
AVCodec.
Definition: avcodec.h:3392
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:182
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2324
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:495
int print_stats
Definition: ffmpeg_opt.c:110
attribute_deprecated AVPicture pict
Definition: avcodec.h:3710
float dts_error_threshold
Definition: ffmpeg_opt.c:93
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:494
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
uint64_t data_size
Definition: ffmpeg.h:472
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:452
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:835
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1661
Undefined.
Definition: avutil.h:265
AVSubtitleRect ** rects
Definition: avcodec.h:3738
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2352
int encoding_needed
Definition: ffmpeg.h:402
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:609
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4261
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3417
Format I/O context.
Definition: avformat.h:1314
uint64_t samples_decoded
Definition: ffmpeg.h:345
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:225
unsigned int nb_stream_indexes
Definition: avformat.h:1252
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:73
int64_t cur_dts
Definition: avformat.h:1054
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3701
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:881
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:312
uint64_t frames_decoded
Definition: ffmpeg.h:344
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:245
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:916
char * logfile_prefix
Definition: ffmpeg.h:445
static uint8_t * subtitle_out
Definition: ffmpeg.c:135
static int main_return_code
Definition: ffmpeg.c:318
static int64_t start_time
Definition: ffplay.c:330
int copy_initial_nonkeyframes
Definition: ffmpeg.h:462
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:112
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:542
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:141
Opaque data information usually continuous.
Definition: avutil.h:195
AVDictionary * sws_dict
Definition: ffmpeg.h:454
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:198
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:212
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3150
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:675
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int stdin_interaction
Definition: ffmpeg_opt.c:112
FILE * logfile
Definition: ffmpeg.h:446
AVDictionary * opts
Definition: ffmpeg.h:491
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:565
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1485
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1304
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
attribute_deprecated int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3670
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2463
static int need_output(void)
Definition: ffmpeg.c:3353
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:375
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:262
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:993
static double psnr(double d)
Definition: ffmpeg.c:1221
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1647
int do_benchmark
Definition: ffmpeg_opt.c:100
int audio_sync_method
Definition: ffmpeg_opt.c:96
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:82
int shortest
Definition: ffmpeg.h:497
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1382
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:1848
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:4225
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:111
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:364
uint8_t * data
Definition: avcodec.h:1467
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
static void set_tty_echo(int on)
Definition: ffmpeg.c:3405
AVDictionary * resample_opts
Definition: ffmpeg.h:456
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2670
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:231
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:3685
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4254
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:140
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:401
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1283
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:317
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:819
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1226
int resample_sample_rate
Definition: ffmpeg.h:298
uint8_t * data
Definition: avcodec.h:1411
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:312
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:488
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:289
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3702
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:442
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2917
AVCodec * dec
Definition: ffmpeg.h:264
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1250
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2692
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:182
int top_field_first
Definition: ffmpeg.h:289
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1442
int nb_output_streams
Definition: ffmpeg.c:143
int file_index
Definition: ffmpeg.h:255
const OptionDef options[]
Definition: ffserver.c:3962
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5142
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2269
unsigned int * stream_index
Definition: avformat.h:1251
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:295
int resample_height
Definition: ffmpeg.h:293
int wrap_correction_done
Definition: ffmpeg.h:276
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:278
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:259
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:859
unsigned m
Definition: audioconvert.c:187
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1333
int64_t next_dts
Definition: ffmpeg.h:271
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1499
attribute_deprecated int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Definition: avpicture.c:37
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:480
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2529
static volatile int transcode_init_done
Definition: ffmpeg.c:316
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3406
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3453
int rate_emu
Definition: ffmpeg.h:367
int width
width and height of the video frame
Definition: frame.h:230
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1846
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1264
static void reset_eagain(void)
Definition: ffmpeg.c:3656
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:635
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2091
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:335
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1562
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:602
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3029
FilterGraph ** filtergraphs
Definition: ffmpeg.c:147
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:457
AVFilterContext * filter
Definition: ffmpeg.h:224
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:332
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:487
int64_t start
Definition: ffmpeg.h:268
int loop
Definition: ffmpeg.h:353
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3700
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:342
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:412
int video_sync_method
Definition: ffmpeg_opt.c:97
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:130
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:176
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:154
char * sdp_filename
Definition: ffmpeg_opt.c:89
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:420
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2196
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
int capabilities
Codec capabilities.
Definition: avcodec.h:3411
int initial_padding
Audio only.
Definition: avcodec.h:3204
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:129
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
unsigned int nb_programs
Definition: avformat.h:1467
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:420
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1627
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:486
int av_frame_get_decode_error_flags(const AVFrame *frame)
AVChapter ** chapters
Definition: avformat.h:1518
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:75
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1413
int av_log_get_level(void)
Get the current log level.
Definition: log.c:377
const char * name
Name of the codec implementation.
Definition: avcodec.h:3399
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:770
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:72
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:586
int force_fps
Definition: ffmpeg.h:427
int qsv_transcode_init(OutputStream *ost)
Definition: ffmpeg_qsv.c:183
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:960
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1247
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
int qp_hist
Definition: ffmpeg_opt.c:111
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:80
float frame_drop_threshold
Definition: ffmpeg_opt.c:98
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:935
int64_t error[4]
Definition: ffmpeg.h:486
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1473
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2900
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2338
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:147
uint32_t end_display_time
Definition: avcodec.h:3736
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3739
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:896
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2500
OutputFilter * filter
Definition: ffmpeg.h:448
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:1919
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:343
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:431
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:834
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1497
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:842
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1370
static int nb_frames_drop
Definition: ffmpeg.c:129
A bitmap, pict will be set.
Definition: avcodec.h:3681
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
int nb_output_files
Definition: ffmpeg.c:145
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:207
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:202
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:426
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:252
static int transcode(void)
Definition: ffmpeg.c:4090
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:787
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:398
char filename[1024]
input or output filename
Definition: avformat.h:1390
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:451
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3355
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int saw_first_ts
Definition: ffmpeg.h:285
int abort_on_flags
Definition: ffmpeg_opt.c:109
This side data contains quality related information from the encoder.
Definition: avcodec.h:1328
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1935
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:78
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:232
int width
picture width / height.
Definition: avcodec.h:1711
PVOID HANDLE
char * apad
Definition: ffmpeg.h:457
int64_t nb_samples
Definition: ffmpeg.h:282
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:439
int64_t duration
Definition: ffmpeg.h:354
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:390
const char * name
Definition: avformat.h:523
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:233
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:766
int64_t duration
Definition: movenc-test.c:63
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:750
attribute_deprecated uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3668
int nb_filtergraphs
Definition: ffmpeg.c:148
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:88
int64_t last_ts
Definition: ffmpeg.h:360
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:288
int do_pkt_dump
Definition: ffmpeg_opt.c:103
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2185
int64_t max_frames
Definition: ffmpeg.h:416
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:334
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:300
int audio_channels_mapped
Definition: ffmpeg.h:443
int n
Definition: avisynth_c.h:547
AVDictionary * metadata
Definition: avformat.h:951
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1670
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:577
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:192
Opaque data information usually sparse.
Definition: avutil.h:197
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
#define src
Definition: vp9dsp.c:530
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
static int got_eagain(void)
Definition: ffmpeg.c:3647
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:106
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:225
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:141
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:2648
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:117
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:395
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:844
int ret
Definition: ffmpeg.h:305
Keep a reference to the frame.
Definition: buffersrc.h:55
FILE * out
Definition: movenc-test.c:54
int audio_volume
Definition: ffmpeg_opt.c:95
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:877
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:2915
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:485
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:242
InputFilter ** filters
Definition: ffmpeg.h:320
int fix_sub_duration
Definition: ffmpeg.h:302
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:363
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4267
Definition: ffmpeg.h:70
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2307
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:74
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:830
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:166
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2748
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int frame_size
Definition: mxfenc.c:1821
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:51
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:860
int ost_index
Definition: ffmpeg.h:492
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:720
enum AVMediaType codec_type
Definition: avcodec.h:1540
double ts_scale
Definition: ffmpeg.h:284
int unavailable
Definition: ffmpeg.h:459
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: avcodec.h:3412
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:164
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2520
enum AVCodecID codec_id
Definition: avcodec.h:1549
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:317
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1492
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:181
float max_error_rate
Definition: ffmpeg_opt.c:114
int sample_rate
samples per second
Definition: avcodec.h:2287
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:209
uint64_t frames_encoded
Definition: ffmpeg.h:476
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2060
AVIOContext * pb
I/O context.
Definition: avformat.h:1356
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int ist_index
Definition: ffmpeg.h:352
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:548
static int loop
Definition: ffplay.c:339
int debug
debug
Definition: avcodec.h:2763
static void print_sdp(void)
Definition: ffmpeg.c:2424
const char * graph_desc
Definition: ffmpeg.h:243
int guess_layout_max
Definition: ffmpeg.h:290
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int64_t start_time
Definition: ffmpeg.h:361
main external API structure.
Definition: avcodec.h:1532
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:545
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:307
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:459
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:767
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2510
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:257
const char * attachment_filename
Definition: ffmpeg.h:461
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1564
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1769
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
AVRational time_base
Definition: ffmpeg.h:356
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:595
AVCodecContext * enc_ctx
Definition: ffmpeg.h:414
void * buf
Definition: avisynth_c.h:553
AVFrame * decoded_frame
Definition: ffmpeg.h:265
GLint GLenum type
Definition: opengl_enc.c:105
int av_apply_bitstream_filters(AVCodecContext *codec, AVPacket *pkt, AVBitStreamFilterContext *bsfc)
Apply a list of bitstream filters to a packet.
Definition: utils.c:4687
int extradata_size
Definition: avcodec.h:1648
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
AVCodecContext * dec_ctx
Definition: ffmpeg.h:263
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4001
int nb_coded_side_data
Definition: avcodec.h:3356
AVStream * st
Definition: ffmpeg.h:256
int * audio_channels_map
Definition: ffmpeg.h:442
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:338
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1802
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:142
int index
Definition: gxfenc.c:89
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:906
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2775
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:398
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:388
static int current_time
Definition: ffmpeg.c:132
int64_t sync_opts
Definition: ffmpeg.h:407
char * vstats_filename
Definition: ffmpeg_opt.c:88
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:146
char * disposition
Definition: ffmpeg.h:464
struct InputStream::@24 prev_sub
#define mid_pred
Definition: mathops.h:95
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:683
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:950
int nb_streams_warn
Definition: ffmpeg.h:366
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2442
AVDictionary * decoder_opts
Definition: ffmpeg.h:287
int autorotate
Definition: ffmpeg.h:292
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:569
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1170
int showed_multi_packet_warning
Definition: ffmpeg.h:286
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:113
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:3741
int64_t ts_offset
Definition: ffmpeg.h:359
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:206
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:832
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4047
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:451
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3024
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1509
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:3666
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:271
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:561
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:267
AVFrame * filtered_frame
Definition: ffmpeg.h:417
int source_index
Definition: ffmpeg.h:400
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:271
static volatile int received_nb_signals
Definition: ffmpeg.c:315
int copy_prior_start
Definition: ffmpeg.h:463
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:474
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1613
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:626
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:321
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2472
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1399
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:192
uint8_t level
Definition: svq3.c:150
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:438
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:272
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2235
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:105
int resample_sample_fmt
Definition: ffmpeg.h:297
int forced_kf_count
Definition: ffmpeg.h:435
int64_t start
Definition: avformat.h:1280
OSTFinished finished
Definition: ffmpeg.h:458
char * forced_keyframes
Definition: ffmpeg.h:437
uint64_t data_size
Definition: ffmpeg.h:340
int resample_width
Definition: ffmpeg.h:294
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:274
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1034
struct FilterGraph * graph
Definition: ffmpeg.h:233
uint64_t limit_filesize
Definition: ffmpeg.h:495
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1378
AVIOContext * progress_avio
Definition: ffmpeg.c:133
if(ret< 0)
Definition: vf_mcdeint.c:282
int main(int argc, char **argv)
Definition: ffmpeg.c:4265
int reinit_filters
Definition: ffmpeg.h:323
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:477
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:263
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:927
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:929
static double c[64]
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:940
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3735
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1043
uint64_t samples_encoded
Definition: ffmpeg.h:477
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1279
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:208
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2983
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:938
char * key
Definition: dict.h:87
static FILE * vstats_file
Definition: ffmpeg.c:112
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:80
AVFrame * last_frame
Definition: ffmpeg.h:418
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:93
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1500
int copy_ts
Definition: ffmpeg_opt.c:104
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1326
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3777
AVFormatContext * ctx
Definition: ffmpeg.h:349
int pict_type
Definition: ffmpeg.h:483
AVCodec * enc
Definition: ffmpeg.h:415
AVSubtitle subtitle
Definition: ffmpeg.h:306
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:635
int eof_reached
Definition: ffmpeg.h:350
Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE...
Definition: mathematics.h:76
int forced_kf_index
Definition: ffmpeg.h:436
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:783
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:488
char * avfilter
Definition: ffmpeg.h:449
uint8_t * name
Definition: ffmpeg.h:227
char * value
Definition: dict.h:88
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:317
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:92
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:712
int channels
number of audio channels
Definition: avcodec.h:2288
int top_field_first
Definition: ffmpeg.h:428
OutputFilter ** outputs
Definition: ffmpeg.h:250
InputFile ** input_files
Definition: ffmpeg.c:139
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2510
void av_log_set_flags(int arg)
Definition: log.c:387
AVDictionary * opts
Definition: movenc-test.c:50
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:247
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:219
AVFormatContext * ctx
Definition: ffmpeg.h:490
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:833
void show_usage(void)
Definition: ffmpeg_opt.c:2979
An instance of a filter.
Definition: avfilter.h:304
#define LIBAVCODEC_IDENT
Definition: version.h:43
char * hwaccel_device
Definition: ffmpeg.h:327
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1466
AVDictionary * encoder_opts
Definition: ffmpeg.h:453
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1083
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4439
int height
Definition: frame.h:230
InputFilter ** inputs
Definition: ffmpeg.h:248
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2284
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:336
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:661
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:320
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2255
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2785
OutputFile ** output_files
Definition: ffmpeg.c:144
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1687
int copy_tb
Definition: ffmpeg_opt.c:106
int64_t min_pts
Definition: ffmpeg.h:280
static volatile int received_sigterm
Definition: ffmpeg.c:314
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:257
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3627
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2078
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:299
int stream_index
Definition: avcodec.h:1469
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:919
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:326
enum AVSubtitleType type
Definition: avcodec.h:3719
int64_t first_pts
Definition: ffmpeg.h:410
int nb_inputs
Definition: ffmpeg.h:249
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:942
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:208
#define DECODING_FOR_OST
Definition: ffmpeg.h:260
int index
Definition: ffmpeg.h:399
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1096
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
uint64_t resample_channel_layout
Definition: ffmpeg.h:300
OSTFinished
Definition: ffmpeg.h:392
This structure stores compressed data.
Definition: avcodec.h:1444
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:981
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:1927
int delay
Codec delay.
Definition: avcodec.h:1694
int debug_ts
Definition: ffmpeg_opt.c:107
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3383
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:235
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sigterm_handler(int sig)
Definition: ffmpeg.c:321
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1460
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:121
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1589
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1468
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:832
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:85
InputStream ** input_streams
Definition: ffmpeg.c:137
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:69
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2523
Definition: ffmpeg.h:384
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:759
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3149